blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c1c93a016c5ad7461fd63c45276ddee22ff5720 | c741b8b9982799de6406714257cbf20c6491ade7 | /app.R | 2b8e03b11cb394b7d6261fc07bdebf4f6e2e90f0 | [] | no_license | moloscripts/KMPDC | f1d5ed4759c5049d4abb81d6db9dad7d869ceec1 | 954452a736a43da1fd3b68a8cf4ba4b954dd7414 | refs/heads/main | 2023-06-20T13:14:24.103385 | 2021-07-21T20:19:36 | 2021-07-21T20:19:36 | 388,144,754 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,102 | r | app.R |
# App developed my Data Cube Solutions
# contactdatacube@gmail.com / molo.andrew@gmail.com
# Data is reproducible
# https://stackoverflow.com/questions/54914541/global-r-dont-start/66802176#66802176
library(easypackages)
libraries("shiny","shinydashboard","tidyverse","lubridate", "plotly","Rcpp","shinyjs","rsconnect")
# theme_set(theme_minimal())
# Visuals ####
# Dataset
medicalPractitioners <- read.csv("Data/MedicalPractitioners.csv")
medicalPractitioners$RegDate <- dmy(medicalPractitioners$RegDate)
medicalPractitioners <- medicalPractitioners %>%
select(RegDate, SPECIALTY, SUB_SPECIALTY, Qualification.Count, TOWN) %>%
rename(
`Registration Date` = RegDate,
Specialty = SPECIALTY,
`Sub Specialty` = SUB_SPECIALTY,
`Number of Qualifications` = Qualification.Count,
Town = TOWN) %>%
dplyr::mutate(Year = lubridate::year(`Registration Date`))
medicalPractitioners$`Year Range` = cut(medicalPractitioners$Year, c(1970, 1980, 1990, 2000, 2010, 2020, 2025))
levels(medicalPractitioners$`Year Range`) = c("1970-1980", "1981-1990", "1991-2000","2001-2010", "2011-2020","2021")
medicalPractitioners$`Year Range` <- factor(medicalPractitioners$`Year Range`, ordered = T, levels = c("1970-1980", "1981-1990", "1991-2000","2001-2010", "2011-2020"))
# Function for converting to Factors
to_factor <- c("Specialty","Sub Specialty","Number of Qualifications","Town")
for (col in to_factor) {
medicalPractitioners[[col]] <- as.factor(as.character(medicalPractitioners[[col]]))
}
## Plots ####
## Count of Medical Practitioners ####
medicalPractitioners %>%
group_by(`Year Range`) %>%
summarise(count = n()) %>%
ggplot(aes(`Year Range`, count)) +
geom_line(aes(group = 1),color="#aa2b1d", size=1) +
geom_point(size=4, color="#28527a") +
labs(title = "Count of Medical Practitioners in Kenya",
subtitle = "Data from 1978 to 2020",
caption = "Source: medicalboard.co.ke",
x="") +
theme(
plot.title = element_text(color = "#23689b", size = 20, face = "bold",hjust = 0.5),
plot.subtitle = element_text(color = "#161d6f", size = 13, face = "bold",hjust = 0.5),
plot.caption = element_text(color = "#0f1123", size = 10, face = "italic"),
axis.text.x = element_text(face = "bold", size = 12),
axis.text.y = element_text(face = "bold", size = 12)
) +
geom_label(aes(label=count),
nudge_x = 0.1,
nudge_y = 0.2,
size=5)
####
MPQualificationsDF <- medicalPractitioners %>%
group_by(`Year Range`, `Number of Qualifications`) %>%
summarise(count = n()) %>%
ggplot(aes(`Year Range`, count, group=`Number of Qualifications`)) +
geom_line(aes(color=`Number of Qualifications`), size=1) +
geom_point(aes(color=`Number of Qualifications`), size=5) +
labs(title = "Count of Qualifications of Medical Practitioners",
subtitle = "Data from 1978 to 2020",
caption = "Source:https://medicalboard.co.ke/DashBoard.php ",
x="") +
theme(
plot.title = element_text(color = "#23689b", size = 20, face = "bold",hjust = 0.5),
plot.subtitle = element_text(color = "#161d6f", size = 13, face = "bold",hjust = 0.5),
plot.caption = element_text(color = "#0f1123", size = 10, face = "italic"),
axis.text.x = element_text(face = "bold", size = 12),
axis.text.y = element_text(face = "bold", size = 12),
legend.title = element_blank(),
legend.position = "top"
) +
geom_label(aes(label=count),
nudge_x = 0.15,
nudge_y = 4,
size=4)
## Top Specialties in the last decade ####
SpecialtiesDF <- medicalPractitioners %>%
count(Specialty, sort = T) %>%
filter(n>20)
ggplot(SpecialtiesDF, aes(reorder(Specialty, n), n)) +
geom_col(aes(fill=Specialty)) +
coord_flip() +
labs(title = "Top Specialties of Medical Practitioners",
subtitle = "Data from 1978 to 2020",
caption = "Source:https://medicalboard.co.ke/DashBoard.php ",
x="", y="count") +
theme(
plot.title = element_text(color = "#23689b", size = 20, face = "bold",hjust = 0.5),
plot.subtitle = element_text(color = "#161d6f", size = 13, face = "bold",hjust = 0.5),
plot.caption = element_text(color = "#0f1123", size = 10, face = "italic"),
axis.text.x = element_text(face = "bold", size = 12),
axis.text.y = element_text(face = "bold", size = 12),
legend.title = element_blank(),
legend.position = "none"
) +
geom_label(aes(label=n),
nudge_x = 0.15,
nudge_y = 0.9,
size=4)
# Shiny Dashboard Framework ####
town <- unique(medicalPractitioners$Town)
# Define UI for application that draws a histogram
ui <- fluidPage(
dashboardPage(skin = "yellow",
dashboardHeader(
title = "KMPDC Data Dashboard",
titleWidth = 250
),
dashboardSidebar(
sidebarMenu(
sidebarSearchForm(textId = "Search", buttonId = "searchTown",
label = "Search Town")
)
),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
),
fluidRow(
tabBox(width = 12, height = NULL, selected = "Count of Practitioners",
tabPanel("Count of Practitioners", plotlyOutput("practitioners_count")),
tabPanel("Number of Qualifications per Medical Practitioners", plotlyOutput("qualifications_count")),
tabPanel("Top Specialties of Medical Practitioners", plotlyOutput("specialityCount"))
)
)
) # End of dashboardBody()
) # End of dashboardPage()
)
server <- function(input, output) {
filtered_data <- reactive({
medicalPractitioners %>%
filter(Town %in% toupper(input$Search))
})
# Server Output - Count of Practitioners ####
output$practitioners_count <- renderPlotly({
filter(medicalPractitioners, Town==toupper(input$Search)) %>%
group_by(`Year Range`) %>%
summarise(count = n()) %>%
ggplot(aes(`Year Range`, count)) +
geom_line(aes(group = 1),color="#E9896A", size=1) +
geom_point(size=4, color="#00bfc4") +
labs(
subtitle = "Data from 1978 to 2020",
caption = "Source: medicalboard.co.ke",
x="") +
theme(
# plot.title = element_text(color = "#23689b", size = 13, face = "bold",hjust = 0.5),
plot.subtitle = element_text(color = "#161d6f", size = 9, face = "bold",hjust = 0.5),
plot.caption = element_text(color = "#0f1123", size = 10, face = "italic"),
axis.text.x = element_text(face = "bold", size = 8),
axis.text.y = element_text(face = "bold", size = 8)
) +
geom_label(aes(label=count),
nudge_x = 0.1,
nudge_y = 0.2,
size=5) +
theme_minimal()
})
# Server Output - Count of Qualifications ####
output$qualifications_count <- renderPlotly({
filter(medicalPractitioners, Town==toupper(input$Search)) %>%
group_by(`Year Range`, `Number of Qualifications`) %>%
summarise(count = n()) %>%
ggplot(aes(`Year Range`, count, group=`Number of Qualifications`)) +
geom_line(aes(color=`Number of Qualifications`), size=1) +
geom_point(aes(color=`Number of Qualifications`), size=3) +
labs(
subtitle = "Data from 1978 to 2020",
caption = "Source:https://medicalboard.co.ke/DashBoard.php ",
x="") +
theme(
# plot.title = element_text(color = "#23689b", size = 20, face = "bold",hjust = 0.5),
plot.subtitle = element_text(color = "#161d6f", size = 13, face = "bold",hjust = 0.5),
plot.caption = element_text(color = "#0f1123", size = 10, face = "italic"),
axis.text.x = element_text(face = "bold", size = 8),
axis.text.y = element_text(face = "bold", size = 8),
legend.title = element_blank(),
legend.position = "top"
) +
geom_label(aes(label=count),
nudge_x = 0.15,
nudge_y = 4,
size=2) +
theme_minimal()
})
# Server Output - Top Specialties ####
output$specialityCount <- renderPlotly({
filter(medicalPractitioners, Town==toupper(input$Search)) %>%
count(Specialty, sort = T) %>%
filter(n>20) %>%
ggplot(aes(reorder(Specialty, n), n)) +
geom_col(aes(fill=Specialty)) +
coord_flip() +
labs(
subtitle = "Data from 1978 to 2020",
caption = "Source:https://medicalboard.co.ke/DashBoard.php ",
x="", y="count") +
theme(
# plot.title = element_text(color = "#23689b", size = 20, face = "bold",hjust = 0.5),
plot.subtitle = element_text(color = "#161d6f", size = 13, face = "bold",hjust = 0.5),
plot.caption = element_text(color = "#0f1123", size = 10, face = "italic"),
axis.text.x = element_text(face = "bold", size = 8),
axis.text.y = element_text(face = "bold", size = 8),
legend.title = element_blank(),
legend.position = "none"
) +
geom_label(aes(label=n),
nudge_x = 0.15,
nudge_y = 0.9,
size=2) +
theme_minimal()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
6b16888e3ef2d4849a212550fb940df10461b779 | 29585dff702209dd446c0ab52ceea046c58e384e | /aroma.core/R/GLAD.EXTS.R | e65394bb7bf22e348abe58e8bb8ad03ea08097da | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,566 | r | GLAD.EXTS.R | setMethodS3("extractCopyNumberRegions", "profileCGH", function(object, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pv <- object$profileValues;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Allocate result table
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Identify unique regions
uRegions <- unique(pv$Region);
nbrOfRegions <- length(uRegions);
# Columns
colClasses <- c(chromosome="character", start="integer",
stop="integer", mean="double", nbrOfLoci="integer",
call="character");
df <- dataFrame(colClasses, nrow=nbrOfRegions);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Extract each region
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for (rr in seq_along(uRegions)) {
# Get the region ID
region <- uRegions[rr];
# Get the first and last position of each region
idx <- which(region == pv$Region);
idx <- idx[c(1,length(idx))];
idx1 <- idx[1];
# Chromosome
df[rr,"chromosome"] <- pv$Chromosome[idx1];
# (start, stop, length)
df[rr,c("start", "stop")] <- as.integer(pv$PosBase[idx]);
# Number of SNPs
df[rr,"nbrOfLoci"] <- as.integer(diff(idx)+1);
# Smoothing
df[rr,"mean"] <- pv$Smoothing[idx1];
# Call
df[rr,"call"] <- c("loss", "neutral", "gain")[pv$ZoneGNL[idx1]+2];
}
CopyNumberRegions(
chromosome=df$chromosome,
start=df$start,
stop=df$stop,
mean=df$mean,
count=df$nbrOfLoci,
call=df$call
);
}) # extractCopyNumberRegions()
setMethodS3("extractRawCopyNumbers", "profileCGH", function(object, ...) {
pv <- object$profileValues;
chromosome <- unique(pv$Chromosome);
chromosome <- Arguments$getIndex(chromosome);
RawCopyNumbers(cn=pv$LogRatio, x=pv$PosBase, chromosome=chromosome);
})
setMethodS3("drawCnRegions", "profileCGH", function(this, ...) {
cnr <- extractCopyNumberRegions(this, ...);
drawLevels(cnr, ...);
})
# Patch for plotProfile() of class profileCGH so that 'ylim' argument works.
# Added also par(cex=0.8) - see code.
setMethodS3("drawCytoband", "profileCGH", function(fit, chromosome=NULL, cytobandLabels=TRUE, colCytoBand=c("white", "darkblue"), colCentro="red", unit=6, ...) {
requireWithMemory("GLAD") || throw("Package not loaded: GLAD"); # data("cytoband")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'fit':
if (!"PosBase" %in% names(fit$profileValues))
throw("Argument 'fit' does not contain a 'PosBase' field.");
# Argument 'chromosome':
if (is.null(chromosome)) {
chromosome <- unique(fit$profileValues$Chromosome);
if (length(chromosome) > 1) {
throw("Argument 'chromosome' must not be NULL if 'fit' contains more than one chromosome: ", paste(chromosome, collapse=", "));
}
}
if (length(chromosome) > 1) {
throw("Argument 'chromosome' must not contain more than one chromosome: ", paste(chromosome, collapse=", "));
}
xScale <- 1/(10^unit);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get chromosome lengths
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load data
# To please R CMD check on R v2.6.0
cytoband <- NULL; rm(list="cytoband");
data("cytoband", envir=sys.frame(sys.nframe())); # Package 'GLAD'
genomeInfo <- aggregate(cytoband$End,
by=list(Chromosome=cytoband$Chromosome, ChrNumeric=cytoband$ChrNumeric),
FUN=max, na.rm=TRUE);
names(genomeInfo) <- c("Chromosome", "ChrNumeric", "Length");
genomeInfo$Chromosome <- as.character(genomeInfo$Chromosome);
genomeInfo$ChrNumeric <- as.integer(as.character(genomeInfo$ChrNumeric));
LabelChr <- data.frame(Chromosome=chromosome);
LabelChr <- merge(LabelChr, genomeInfo[, c("ChrNumeric", "Length")],
by.x="Chromosome", by.y="ChrNumeric", all.x=TRUE);
LabelChr$Length <- 0;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the cytoband details for the chromosome of interest
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Drop column 'Chromosome'
## Gives a NOTE in R CMD check R v2.6.0, which is nothing, but we'll
## use a workaround to get a clean result. /HB 2007-06-12
Chromosome <- NULL; rm(list="Chromosome"); # dummy
cytobandNew <- subset(cytoband, select=-Chromosome);
cytobandNew <- merge(LabelChr, cytobandNew, by.x="Chromosome",
by.y="ChrNumeric");
# Rescale x positions according to units
cytobandNew$Start <- xScale*cytobandNew$Start;
cytobandNew$End <- xScale*cytobandNew$End;
# Where should the cytoband be added and how wide should it be?
usr <- par("usr");
dy <- diff(usr[3:4]);
drawCytoband2(cytobandNew, chromosome=chromosome,
labels=cytobandLabels, y=usr[4]+0.02*dy, height=0.03*dy,
colCytoBand=colCytoBand, colCentro=colCentro);
}, private=TRUE) # drawCytoband()
############################################################################
# HISTORY:
# 2010-02-19
# o Moved drawCytoband2() to its own file, because it no longer requires
# the GLAD package.
# 2009-05-14
# o Moved extractRawCopyNumbers() for profileCGH from aroma.affymetrix.
# o Moved extractCopyNumberRegions() for profileCGH from aroma.affymetrix.
# 2009-05-10
# o Moved to aroma.core v1.0.6. Source files: profileCGH.drawCnRegions.R
# and profileCGH.drawCytoband.R.
# 2008-05-21
# o Now extractRawCopyNumbers() adds 'chromosome' to the returned object.
# 2007-09-04
# o Now data("cytoband") is loaded to the local environment.
# 2007-08-22
# o Update plotProfile2() to utilizes drawCnRegions().
# 2007-08-20
# o Added drawCnRegions().
# 2007-06-11
# o Added explicit call to GLAD::myPalette() to please R CMD check R v2.6.0.
# 2007-01-03
# o Made the highlighting "arrow" for the centromere smaller.
# 2006-12-20
# o It is now possible to specify 'xlim' as well as 'ylim'.
# o Reimplemented, because the cytoband was not displayed correctly.
############################################################################
|
73f382bfbc2b6109f8325b9698b35265143d41f2 | 34228464eec9a4c0d741ff754691f39aae95b1a2 | /TCGA_adeno/LP_Test_adeno.R | 74ff9003709c9afa3a35e06a18b2a50a6812263f | [] | no_license | yingstat/MOAB | 53dd05154dd42fc14c3defb8f80713e22690cee5 | 36aba9de4e5f462973ed15782bdbc1c5482304e6 | refs/heads/master | 2023-02-21T10:01:49.539590 | 2021-01-21T18:02:28 | 2021-01-21T18:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,555 | r | LP_Test_adeno.R | source('MOAB_main/Benchmark_HD_LD.R')
Raw=readRDS('TCGA_adeno/data/AllX')
PointLab=readRDS('TCGA_adeno/data/nodelabs')
#########
#tSNE
########
#read in data
tEmbed=readRDS('TCGA_adeno/Embeddings/tSNE')
kVals=c(10,25,50,75,100,200,300)
kMat_tSNE=matrix(0,nrow=length(kVals),ncol=30)
for(i in 1:length(kVals)){
print('k we are on')
print(i)
Res=Benchmark_HD_LD(tEmbed,Raw,PointLab,kVals[i],0)
print(mean(Res))
kMat_tSNE[i,]=Res
}
rownames(kMat_tSNE)=kVals
#######################
#UMAP
#######################
tEmbed=readRDS('TCGA_adeno/Embeddings/UMAP')
kVals=c(10,25,50,75,100,200,300)
kMat_UMAP=matrix(0,nrow=length(kVals),ncol=30)
for(i in 1:length(kVals)){
print('k we are on')
print(i)
Res=Benchmark_HD_LD(tEmbed,Raw,PointLab,kVals[i],0)
print(mean(Res))
kMat_UMAP[i,]=Res
}
rownames(kMat_UMAP)=kVals
####################
#Large Vis###########
####################
tEmbed=readRDS('TCGA_adeno/Embeddings/LargeVis')
kVals=c(10,25,50,75,100,200,300)
#take transpose only for tEmbed
tEmbed=t(tEmbed)
kMat_LV=matrix(0,nrow=length(kVals),ncol=30)
for(i in 1:length(kVals)){
print('k we are on')
print(i)
Res=Benchmark_HD_LD(tEmbed,Raw,PointLab,kVals[i],0)
print(mean(Res))
kMat_LV[i,]=Res
}
rownames(kMat_LV)=kVals
#############################
#PCA
##############################
tEmbed=readRDS('TCGA_adeno/Embeddings/PCA')
kVals=c(10,25,50,75,100,200,300)
#take transpose only for tEmbed
kMat_PCA=matrix(0,nrow=length(kVals),ncol=30)
for(i in 1:length(kVals)){
print('k we are on')
print(i)
Res=Benchmark_HD_LD(tEmbed,Raw,PointLab,kVals[i],0)
print(mean(Res))
kMat_PCA[i,]=Res
}
rownames(kMat_PCA)=kVals
######################
#TriMap########
#####################
print('on trimap')
tEmbed=readRDS('TCGA_adeno/Embeddings/TriMap')
kVals=c(10,25,50,75,100,200,300)
#take transpose only for tEmbed
kMat_TriMap=matrix(0,nrow=length(kVals),ncol=30)
for(i in 1:length(kVals)){
print('k we are on')
print(i)
Res=Benchmark_HD_LD(tEmbed,Raw,PointLab,kVals[i],0)
print(mean(Res))
kMat_TriMap[i,]=Res
}
rownames(kMat_TriMap)=kVals
##############################
#Lamp
###############################
print('on lamp')
tEmbed=readRDS('TCGA_adeno/Embeddings/Lamp')
kVals=c(10,25,50,75,100,200,300)
#take transpose only for tEmbed
kMat_Lamp=matrix(0,nrow=length(kVals),ncol=30)
for(i in 1:length(kVals)){
print('k we are on')
print(i)
Res=Benchmark_HD_LD(tEmbed,Raw,PointLab,kVals[i],0)
print(mean(Res))
kMat_Lamp[i,]=Res
}
rownames(kMat_Lamp)=kVals
#collect the results
library('ggplot2')
library('reshape2')
library('plyr')
FullDF=rbind(rowMeans(kMat_tSNE),rowMeans(kMat_UMAP),rowMeans(kMat_LV),rowMeans(kMat_TriMap),rowMeans(kMat_PCA),rowMeans(kMat_Lamp))
rownames(FullDF)=c('tSNE','UMAP','LargeVis','TriMap','PCA','Lamp')
#optional plotting
#valVec=c('gray50','darkviolet','darkolivegreen3','deeppink1','deepskyblue4','darkorange2','darkgoldenrod','black','darkblue')
#valVec=valVec[-c(5,7,9)]
#FullDF=melt(FullDF)
#names(FullDF)=c('Method','k','score')
#p<-ggplot(DT3, aes(x=k, y=Score, group=Method)) +
# geom_line(aes(color=Method),lwd=1.3)+
# geom_point(aes(color=Method),size=1.5)+scale_color_manual(values=valVec)
#p=p+theme_minimal()+theme(text = element_text(size=22))+xlab('k (# of nearest neighbors)')+ylab('LP Score')+scale_color_manual(values = valVec)+ggtitle('')
#p=p+theme_classic()+theme(axis.text.y = element_text(size=22),axis.text.x=element_text(size=22))+theme(legend.position='none')
#p=p+theme(axis.title.y = element_text(size=22),axis.title.x=element_text(size=22))
#ggsave('~/adeno_LP.pdf',p,width=5,height=5)
|
db345b229e2d8744796de5d89ac5cf17a9c6c200 | 4e3ef191ae5415eb0655f6d9fe8349f1aa54e80c | /R/draw.gdp_comps.R | 5d861c48a1f524c04b1aa8e9c4437a0245bb0fcf | [] | no_license | fernote7/BETS | 2e5624a4a5210cdeb583cb664548989bfec7edb3 | 5ba58b5e1997f6f47cdba1a686d9a3a1b95624ad | refs/heads/master | 2021-01-22T17:23:21.115413 | 2017-09-05T18:09:58 | 2017-09-05T18:09:58 | 86,753,812 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,907 | r | draw.gdp_comps.R | #' @title Create a chart of the Base Interest Rate (SELIC) time series
#'
#' @description Creates a plot of series 4189
#'
#' @return An image file is saved in the 'graphs' folder, under the BETS installation directory.
#' @importFrom zoo as.Date as.yearqtr
#' @importFrom forecast ma
#' @importFrom utils read.csv
#' @importFrom stats aggregate
#' @import plotly
#' @importFrom seasonal seas
#' @author Talitha Speranza \email{talitha.speranza@fgv.br}
draw.gdp_comps = function(){
gdp_comp = paste0(system.file(package="BETS"), "/mon_pib_comps.csv")
data <- ts(read.csv2(gdp_comp, stringsAsFactors = F)[,-1],start = c(2000,1), frequency = 12)
data <- aggregate(data)
year2 = end(data)[1]
year1 = end(data)[1]-1
data <- window(data, start = year1)
data[,5] = data[,5] - data[,6]
data = data[,c(-6,-1)]
data = t(data)
rownames(data) = c("Hous.<br>Exp.", "Gov.<br>Exp.","GFFK","NX")
#s = apply(data[,-1], 1, function(x){sum(x)})
# cbind(data[,1],s)
colors <- c('rgb(211,94,96)', 'rgb(128,133,133)', 'rgb(144,103,167)', 'rgb(171,104,87)', 'rgb(114,147,203)')
a <- list(
x = 0.18,
y = 0.5,
text = paste0("<b>", year1,"</b>"),
xref = "paper",
yref = "paper",
showarrow = F,
font = list(size = 18)
)
b <- list(
x = 0.82,
y = 0.5,
text = paste0("<b>", year2,"</b>"),
xref = "paper",
yref = "paper",
showarrow = F,
font = list(size = 18)
)
m <- list(
t = 50,
pad = 1
)
p <- plot_ly(width = 700, height = 450) %>%
add_pie(labels = rownames(data), values = data[,1],
textposition = 'inside',
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF', size = 16),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = F,
hole = 0.4,
domain = list(x = c(0, 0.45), y = c(0, 1))) %>%
add_pie(labels = rownames(data), values = data[,2],
textposition = 'inside',
textinfo = "label+percent",
insidetextfont = list(color = '#FFFFFF', size = 16),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = F,
hole = 0.4,
domain = list(x = c(0.55, 1), y = c(0, 1))) %>%
layout(title = '<b>GDP COMPONENTS</b><br><span style = "font-size:17">Nominal Yearly GDP - GDP Monitor (FGV/IBRE)</span>',
annotations = list(a,b),
titlefont = list(size = 19),
margin = m,
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
return(p)
} |
d2054922f6e8215c9f559022c5cc5663439864ef | 6e41a0025ac0cbf86cc30c06f0ea74f7bc31c418 | /man/trans.Rd | 6c571416ec0cb9077e5606d83162e5eac2ff2da7 | [] | no_license | MrLehna/HMM | 87acad48a97c9ecf114a0acc087fb7b05a3e2084 | 6cb42f8aa98c8a0c6c325c51c150638fe6cc4a04 | refs/heads/master | 2020-03-25T15:39:59.234492 | 2018-08-07T14:31:25 | 2018-08-07T14:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,382 | rd | trans.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Transformation.R
\name{trans}
\alias{trans}
\title{Transformation function - DM}
\usage{
trans(factor, m)
}
\arguments{
\item{factor}{see details}
\item{m}{number of Likelihoods}
}
\value{
returns a matrix with the sigma,Gamma and theta matrix bound together (collumn wise)
}
\description{
Due to our single optimation problem we have to build constrains for Gamma and
Sigma to fullfill the requirements of probabilties.
}
\details{
In the direct maximisation the nlme()- minimisation function can not directly implement
the constrains of the parameter values. This function ensures that the the estimated parameters
of the direct optimisation still fullfill their requirements. These are that the probabilities are
between zero and one and that the rows of gamma (as well as sigma) sum up to one.
For this transformation the probit model is used.
Thus with the input factor containing the elements that determine Sigma and Gamma we need
the following number of elements for each parameter:
Sigma vector (1 x m) - (m-1) elements required
Gamma matrix (m x m) - m(m-1) elements required
Theta vector (1 x m) - m elements required
By this defintion the input factor vector has to contain the elements for Sigma, Gamma and Theta
in that order and has to have the dimension: (m+1)(m-1) + m
}
|
936f4b5a92d96a11e2a0826faaf63b7aa4996175 | 402956c1f9adfd625ee01113c7eeb7ec745ff6e9 | /exemple.R | 981f278009043fb23408369876f6df6499620f44 | [] | no_license | slevu/rapport_ehpad | 83bed175e26c674bf036806cca4fc425bdcabaf2 | f9938032aa7fe207f4ead57a13c91c4a3c86d148 | refs/heads/main | 2023-02-28T08:55:33.563882 | 2021-02-10T15:46:28 | 2021-02-10T15:46:28 | 337,700,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,033 | r | exemple.R | ## exemple rapports par région
## Stephane - 10/2/21
##---- mock individual data ----
nregfr <- 18 ## france
ndptfr <- 101
netab <- 4
k <- 3 # nb region
region <- LETTERS[1:k]
set.seed(123)
{
ndpt_par_region <- rpois(k, ndptfr/nregfr)
reg <- rep(region, ndpt_par_region)
dep <- paste0(reg, sequence(ndpt_par_region))
etab <- paste0("e",1:netab)
## all etab in all dpt
c12 <- expand.grid(dep = dep, etab = etab, stringsAsFactors = FALSE)
## some dpt have missing etab type
nr <- nrow(c12)
c12 <- c12[- sample(1:nr, floor(nr/10)),]
df0 <- data.frame(
region = sub("[0-9]", "", c12[,"dep"]),
c12,
x = rpois(nrow(c12),3),
y = sample(c(NA, 1:9),nrow(c12), replace = TRUE),
stringsAsFactors = FALSE
)
}
saveRDS(df0, "mockdataset_ehpad.rds")
##---- summarize function ----
ftab <- function(df){
aggregate(df[, c("x","y")],
list(typ_etab = df$etab),
function(x) sum(x, na.rm = TRUE)
)
}
##---- table France -----
tabfr <- ftab(df0)
##---- table regions ----
ltab <- lapply(setNames(unique(df0$region), unique(df0$region)), function(r){
## by region
dfr <- df0[df0$region == r,]
tabr <- list(ftab(dfr))
names(tabr) <- r
## by dpt
ltabd <- lapply(setNames(unique(dfr$dep), unique(dfr$dep)), function(d){
dfd <- dfr[dfr$dep == d,]
tabd <- ftab(dfd)
})
c(tabr, ltabd)
})
# names(ltab)
saveRDS(ltab, "ltab.rds")
##---- unit report ----
unit_report <- function(reg = "A", fmt = "html", verbose = FALSE){
OFN <- paste0(paste0(Sys.Date(), "_"), reg, ".", fmt)
rmarkdown::render(input = "exemple_rapport.Rmd",
output_format = paste0(fmt, "_document"),
params = list(region = reg),
encoding = "UTF-8",
output_file = OFN,
quiet = TRUE)
if (verbose) print(paste("Output saved to", OFN))
}
# unit_report(reg = "B", verbose = TRUE)
# unit_report(fmt = "pdf")
##---- all reports ----
dummy <- lapply(names(ltab), unit_report)
|
84a4fde7a765d9ead5283ef1c16096341123f3ab | 89fd142ff8c81c3740b175aec923e3caadac859a | /man/crear_subgrafo_inducido.Rd | 2fa5d34154331f0a623b3de37b93c7b74afb5a4a | [] | no_license | guilleloro/Grafos | 941420c277cba8068678cd29cacf909559be3026 | fdf3c7d4d22b0f34eb5708b6500c6d0eeef36f20 | refs/heads/master | 2022-07-18T16:03:25.223685 | 2020-05-19T15:31:53 | 2020-05-19T15:31:53 | 265,262,325 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | true | 514 | rd | crear_subgrafo_inducido.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Seguridad.R
\name{crear_subgrafo_inducido}
\alias{crear_subgrafo_inducido}
\title{Subgrafo inducido.}
\usage{
crear_subgrafo_inducido(grafo, lonMuestra)
}
\arguments{
\item{grafo}{grafo que se quiere muestrear .}
\item{lonMuestra}{tamaño del conjunto inicial de vértices muestreados.}
}
\value{
subgrafo muestreado.
}
\description{
Función que crea una muestra de un grafo con la técnica de muestreo inducido.
}
|
4a8304d05b48b7a2e89d872deff160da92556c03 | 3dfeddf016a3754b920c2bad2bf3287faad047c4 | /man/getGitCommit.Rd | 3fc06ef244ca36d74297d1116b2eadb2291817f2 | [] | no_license | acguidoum/Rpolyhedra | 459592b06fbcafff61776fce588fa1d1f1429bdc | 19a80f920fcc511e8d37811ad1827796de722ae9 | refs/heads/master | 2020-03-28T07:01:52.547368 | 2018-07-10T16:16:38 | 2018-07-10T16:16:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 296 | rd | getGitCommit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db-lib.R
\name{getGitCommit}
\alias{getGitCommit}
\title{getGitCommit
get the last git commit sha}
\usage{
getGitCommit()
}
\value{
String with git commit sha
}
\description{
getGitCommit
get the last git commit sha
}
|
bbf7ee7aa920ef8462672f8aef9d1a87c49ea5de | 5867e13f269018253bd376566b04d33d9e1dab3c | /raster_classwork_1.R | 679ddb9d7a14b8ff7f5b17cf2c995d7a78094ec3 | [] | no_license | manidhill0n/Maninder_eagles | af0caa0d566819138cf02ce6d1079e338d954387 | b1a09cb425a8777f229f0cac1eb8f38af7d34f7f | refs/heads/master | 2020-12-24T07:05:28.757772 | 2019-06-19T09:47:10 | 2019-06-19T09:47:10 | 73,380,119 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 376 | r | raster_classwork_1.R | #dataframe to raster
install.packages("raster")
length(df$measure1)
library(raster)
r2<-raster(nrows=100,ncols=100)
r2
r2[]<-df$measure2[1:1000]
plot(r2)
r2
r1<-raster(nrows=100,ncols=100)
r1
r1[]<-df$measure2[1:1000]
plot(r1)
r1
r12<-stack(r1,r2)
r12
plot(r12[[1]])
r12[[1]]
r12$new<-r12[[1]]*r12[[2]]^2
r12
rset<-raster(nrows=30,ncols=30)
rset
df12<-r12[]
head(df12)
|
eacd297580b7ffd7cf72231cc30dca4ae689aecd | c0bcd0b5f2d1abd72de5e775aedebfd407a1a4a3 | /recommeder-system-getting-started/content-based-algorithm/functions.R | 1a3e529f87c508ce75bd7b9bc937d79c5cbce17c | [] | no_license | zembrzuski/machine-learning-andrewng | 707421b12aef715ae8121666b68d1c6f459f0b29 | 520fc38e83468865ec158026a10c73bdfb015dc7 | refs/heads/master | 2021-01-10T01:42:46.852296 | 2015-12-08T18:36:41 | 2015-12-08T18:36:41 | 45,475,686 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,577 | r | functions.R | # it works
addBiasColumn <- function(X) {
cbind(rep(1, nrow(X)), X)
}
addZeroColumn <- function(X) {
cbind(rep(0, nrow(X)), X)
}
# it works
costFunction <- function(X, thetas, y, rating) {
prediction <- X %*% t(thetas)
errorMatrix <- (prediction - y)^2 * rating
1/2 * sum(errorMatrix)
}
# it works
gradient <- function(X, thetas, y, rating) {
prediction <- X %*% t(thetas)
t(((prediction - y) * rating)) %*% X
}
# trying regularization now
costFunctionWithRegularization <- function(X, thetas, y, rating, lambda) {
prediction <- X %*% t(thetas)
errorMatrix <- (prediction - y)^2 * rating
regularization <- sum(thetas[,-1]^2)
1/2 * sum(errorMatrix) + lambda/2*regularization
}
gradientWithRegularization <- function(X, thetas, y, rating, lambda) {
prediction <- X %*% t(thetas)
algo <- t(((prediction - y) * rating)) %*% X
algo + lambda * addZeroColumn(thetas[,-1])
}
# no regularization
gradientDescent <- function(X, thetas, y, rating, alfa, nIter) {
m <- nrow(X)
cost <- rep(NA, nIter)
for(i in 1:nIter) {
cost[i] <- costFunction(X, thetas, y, rating)
thetas <- thetas - alfa * gradient(X, thetas, y, rating)
}
plot(cost)
print(cost[nIter])
thetas
}
gradientDescentWithRegularization <- function(X, thetas, y, rating, alfa, nIter, lambda) {
m <- nrow(X)
cost <- rep(NA, nIter)
for(i in 1:nIter) {
cost[i] <- costFunction(X, thetas, y, rating)
thetas <- thetas - alfa * gradientWithRegularization(X, thetas, y, rating, lambda)
}
plot(cost)
print(cost[nIter])
thetas
}
|
83f825ccce66f249b5daae5996c9349be935a2c6 | bbc167551a93d2a6d7ea43f00fc901ff967a8c62 | /man/load.exp.GEO.Rd | 824ed0b5d6c3d31b6cedcf76fad653ee53a0b097 | [
"Apache-2.0"
] | permissive | jyyulab/NetBID | 0c4c212cddd0180b96506e741350e6b7cfcacfce | 86d62097eda88a6185b494491efdd8b49902e0c3 | refs/heads/master | 2023-04-30T04:16:40.752026 | 2023-02-28T02:57:04 | 2023-02-28T02:57:04 | 118,371,500 | 34 | 10 | Apache-2.0 | 2022-08-23T13:44:58 | 2018-01-21T20:33:57 | R | UTF-8 | R | false | true | 1,559 | rd | load.exp.GEO.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{load.exp.GEO}
\alias{load.exp.GEO}
\title{Download Gene Expression Series From GEO Database with Platform Specified}
\usage{
load.exp.GEO(
out.dir = NULL,
GSE = NULL,
GPL = NULL,
getGPL = TRUE,
update = FALSE
)
}
\arguments{
\item{out.dir}{character, the file path used to save the GSE RData. If the data already exsits, it will be loaded from this path.}
\item{GSE}{character, the GEO Series Accession ID.}
\item{GPL}{character, the GEO Platform Accession ID.}
\item{getGPL}{logical, if TRUE, the corresponding GPL file will be downloaded. Default is TRUE.}
\item{update}{logical, if TRUE, the previous stored Gene ExpressionSet RData will be updated. Default is FALSE}
}
\value{
Return an ExpressionSet class object.
}
\description{
\code{load.exp.GEO} downloads user assigned Gene Expression Series (GSE file) along with its Platform from GEO dataset.
It returns an ExpressionSet class object and saves it as RData. If the GSE RData already exists, it will be loaded directly.
It also allows users to update the Gene Expression Series RData saved before.
}
\examples{
\dontrun{
# Download the GSE116028 which performed on GPL6480 platform
# from GEO and save it to the current directory
# Assign this ExpressionSet object to net_eset
net_eset <- load.exp.GEO(out.dir='./',
GSE='GSE116028',
GPL='GPL6480',
getGPL=TRUE,
update=FALSE)
}
}
|
b044bd92d43866936200f6ebb03c612c327370c9 | 0b02833c0833f945d379408ab2c9c77afe9a0216 | /AUC Final.R | 7189154bbd83ac8e7e706144fb4375497f582c49 | [] | no_license | Karagul/Regression-with-Mortgage-Data | 393287644d8636dc8464887cc28e04b45de699c2 | 2bff880466c3813c62bf3360382d14fe14197a07 | refs/heads/master | 2020-06-14T02:58:36.477350 | 2018-11-14T22:18:33 | 2018-11-14T22:18:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 741 | r | AUC Final.R | glm.out <- c(glm11.out, glm12.out, glm13.out, glm14.out, glm15.out)
yp1 <- predict(glm11.out, propt1, type="response")
yp2 <- predict(glm12.out, propt2, type="response")
yp3 <- predict(glm13.out, scoret2, type="response")
yp4 <- predict(glm14.out, scoret3, type="response")
yp5 <- predict(glm15.out, scoret4, type="response")
yp <- c(yp1, yp2, yp3, yp4, yp5)
#yp <- predict(glm.out, testing2_D2, type="response")
roc <- function(y, s)
{
yav <- rep(tapply(y, s, mean), table(s))
rocx <- cumsum(yav)
rocy <- cumsum(1 - yav)
area <- sum(yav * (rocy - 0.5 * (1 - yav)))
x1 <- c(0, rocx)/sum(y)
y1 <- c(0, rocy)/sum(1 - y)
auc <- area/(sum(y) * sum(1 - y))
print(auc)
plot(x1,y1,"l")
}
roc(testing2_D2$def_in_24_months_F, yp)
|
fe6859c93fd14a8f76e962e88243965c49953870 | b5bc79574cf46cd80962d101fdb3189b054f7e50 | /plot3.R | 8f15049543b9707e7c68b52295f1b1fb919a1bdd | [] | no_license | zakia5/EDA | 7ccc25cbe44eb1055407179375f51859eaeb804d | c601bc64e78b80f7505c2ddba2b5bc7521ab3a89 | refs/heads/master | 2021-01-10T00:58:44.068044 | 2016-01-29T18:08:00 | 2016-01-29T18:08:00 | 50,677,820 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 953 | r | plot3.R | library(data.table)
## reading data
inputFile <- "household_power_consumption.txt"
data <- read.table(inputFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".",na.strings='?')
work_data <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# assign to variables
datetime <- strptime(paste(work_data$Date, work_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
subMetering1 <- as.numeric(work_data$Sub_metering_1)
subMetering2 <- as.numeric(work_data$Sub_metering_2)
subMetering3 <- as.numeric(work_data$Sub_metering_3)
# open device
png(filename='plot3.png',width=480,height=480,units='px')
##plot data
plot(datetime, subMetering1, type="l", ylab="Energy sub metering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
# add legend
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty='solid', col=c("black", "red", "blue"),bty = "n")
# close device
x<-dev.off()
|
e3501653150517302c0231d19884ee37e14ea741 | 43666c1680c3b982c3db9b0de759bb1f380f899f | /UN/UN.R | 8b05951a577ea4b989b66c37b9f934d47cf97118 | [] | no_license | RobHarrand/datadotworld | 4490c24ab8c69d2b01c2ea4bb61e3ad25e6b0f9b | 8ced56e2fb28a64f87d1501c9f35dc7c619a1048 | refs/heads/master | 2020-03-11T08:03:13.721701 | 2018-04-17T08:42:39 | 2018-04-17T08:42:39 | 129,873,592 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,238 | r | UN.R | library(plotly)
GDI = read.csv('GDI_tidied.csv', stringsAsFactors = T, header = T)
HDI = read.csv('HDI_tidied.csv', stringsAsFactors = T, header = T)
MPI = read.csv('MPI_tidied.csv', stringsAsFactors = T, header = T)
MPI$Population_in_MDP_k = gsub(",", ".", MPI$Population_in_MDP_k)
MPI$Population_in_MDP_k = as.numeric(MPI$Population_in_MDP_k)
all = merge(GDI, HDI, by.x = 'Country', by.y = 'Country')
all = merge(all, MPI, by.x = 'Country', by.y = 'Country', all.x = T)
all$Year_of_survey = gsub("/$|M|D|N|P", "", as.character(all$MPI_year_and_survey))
all$Year_of_survey = as.factor(all$Year_of_survey)
Sys.setenv("plotly_username"="robh")
Sys.setenv("plotly_api_key"="ILTpvNQjVCzCgH99TgYW")
p = plot_ly(x = all$Human_Development_Index,
y = all$MPI_index,
alpha= 0.70,
mode = "markers",
type = "scatter",
showlegend = F,
color = all$Country,
size = I(sqrt(all$Population_in_MDP_k)*2),
hoverinfo = "text",
text = paste(toupper(all$Country),
"</br>Human Development Index: ",
all$Human_Development_Index,
"</br>Multi-dimensional Poverty Index: ",
all$MPI_index,
"</br>Life expectency at birth: ",
all$Life_expectancy_at_birth,
"</br>Population in Multidimensional Poverty (thousands): ",
all$Population_in_MDP_k,
"</br>MPI Year of survey: ",
all$Year_of_survey)) %>%
layout(title ="HDI vs MPI",
annotations = list(text = paste("Point size proportional to </br> Population in Multi-dimensional Poverty </br> (thousands)"),
x = 0.7,
y = 0.5,
showarrow = F,
font = list(size = 12,
color = 'white')),
titlefont = t,
plot_bgcolor='black',
xaxis = list(title = "Human Development Index",
titlefont = t),
yaxis = list(title = "Multi-dimensional Poverty Index",
titlefont = t))
plotly_POST(p, sharing = 'public')
p
|
a1f4660ee057a58e5d1ffa8846e70dd451e0bf50 | 5adc0dfe6cae8f90cc20cd149bf03852b0396e34 | /tests/testthat/test_calculation_nutrient.R | 761e1e0e80dbc218368bc3099471ac949b810ad9 | [
"MIT"
] | permissive | AGROFIMS/ragrofims | 43664011980affa495c949586bde192d08d4b48e | bc560a62c19c30bbc75615a19a4b9f8a235f7ddf | refs/heads/master | 2023-02-21T08:49:34.989861 | 2021-01-20T16:22:48 | 2021-01-20T16:22:48 | 277,626,238 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,935 | r | test_calculation_nutrient.R |
library(ragapi)
library(ragrofims)
context("test of calculation nutrient in products")
test_that("Test Calculation of Nutrient Pipeline with empty table API v0291", {
out <- get_agrofims_fertproducts(expsiteId= 6,
format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version = "/0291/r"
)
fertilizer <- get_fertproducts_crop(fertproducts = out, crop = "Field")
fertilizer <- calc_nutamount(fertilizer)
testthat::expect_equal(ncol(fertilizer), 0)
testthat::expect_equal(nrow(fertilizer), 0)
})
test_that("Test calculation of nutrient amount - Empty table with API v0291", {
out <- get_agrofims_fertproducts(expsiteId= 6,
format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version = "/0291/r"
)
fertilizer <- calc_nutamount(fertilizer = out)
testthat::expect_equal(ncol(fertilizer), 0)
testthat::expect_equal(nrow(fertilizer), 0)
})
test_that("Test calculation of nutrient amount - with Other Crop- ID=8 - API v0291", {
out <- get_agrofims_fertproducts(expsiteId=8,
format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version = "/0291/r"
)
fertilizer <- ragrofims::get_fertproducts_crop(out, crop = "omar benites")
fertilizer <- calc_nutamount(fertilizer = fertilizer)
testthat::expect_equal(ncol(fertilizer), 17)
testthat::expect_equal(nrow(fertilizer), 3)
})
test_that("Test calculation of nutrient amount - fertilizer product - Crop Barley ID=18 - API v0291", {
out <- get_agrofims_fertproducts(expsiteId=18,
format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version = "/0291/r"
)
fertilizer <- get_fertproducts_crop(fertproducts = out, crop = "Cassava")
fertilizer <- calc_nutamount(fertilizer = fertilizer)
testthat::expect_equal(ncol(fertilizer), 17)
testthat::expect_equal(nrow(fertilizer), 2)
})
test_that("Test calculation of nutrient amount - with no products - ID=9 - API v0291", {
out <- get_agrofims_fertproducts(expsiteId=9,
format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version = "/0291/r"
)
fertilizer <- calc_nutamount(fertilizer = out)
testthat::expect_equal(ncol(fertilizer), 17)
testthat::expect_equal(nrow(fertilizer), 4)
})
|
875032b0f221c34d2a6d807115c8283ef2b2ba1a | 98b8bb57f4f1b632f99cd01237ae8b116b939e9f | /Basic and Statistics in R.R | 4db4e5672a951b14855dfe99080245cfc5796c24 | [] | no_license | UD125/Basic-and-Statistics-in-R | 404a1af2096f6469053e0889d3e2c3ddfd08a94f | 0e08f517c3b970a8d433acee7c47d53c19aca56e | refs/heads/master | 2021-07-13T22:48:03.859694 | 2017-10-14T11:49:52 | 2017-10-14T11:49:52 | 106,922,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,087 | r | Basic and Statistics in R.R | Name <- c("a","b","c","d","e","f","g","h","i","j")
Age <- c(22,43,12,17,29,5,51,56,9,44)
Sex <- c("M","F","M","M","M","F","F","M","F","F")
data1 <- data.frame(Name,Age,Sex,stringsAsFactors = FALSE)
data1
data1$Sex <- as.factor(data1$Sex)
str(data1)
which.min(data1$Age)
which.max(data1$Age)
cumsum(data1$Age)
cumprod(data1$Age)
prod(data1$Age)
x <- seq(1,100)
above10 <- function(x){
a<- x>10
x[a]
}
above10(x)
x1 <- seq(1,1000)
above50 <- function(x1){
b <- x1>50
x1[b]
}
above50(x1)
above50(x)
#functions
above <- function(x,n){
Use<- x>n
x[Use]
}
above(x,80)
above(x1,600)
y1 <- seq(1,1000)
above60 <- function(x1,y1){
b <- x1>50
c <- y1>60
x1[b]
y1[c]
return(c(b,c))
}
above60(x1,y1)
#Simple Function
A <- function(x,y){
x+y
}
A(2,3)
#Complex Function
A1 <- function(x,y){
z1 <- x+y
z2 <- 2*x+y
z3 <- x+2*y
z4 <- x^2+y^2
return(c(z1,z2,z3,z4))
}
A1(2,3)
ans <- A1(2,3)
A1 <- function(x,y){
z1 <- x+y
z2 <- 2*x+y
z3 <- x+2*y
z4 <- x^2+y^2
print(c(z1,z2,z3,z4))
}
#Control Structers in R
#For Loop & Nested for loops
#While loop
#Repeatloop
#Statistics in R
for(i in 1:100){
print(i)
}
#1
x <- c("a","b","c","d","e")
x
for(letters in x){
print(letters)
}
#Print the first four letters of x
for(i in 1:4){
print(x[i])
}
#Print the years from 2000 to 2020 through for loop
print(paste("The year is",2010))
for(i in 2000:2020){
print(paste("The year is",i))
}
#or
Year <- seq(2000,2020)
for(i in Year){
print(paste("The year is",i))
}
#Print the years from 2000 to 2020 through for loop , if they are only leap years
for(i in Year){
if(i%%4){next}
print(paste("The Leap year is",i))
}
#--------------------------------------Nested For Loops-----------------------------
z <- matrix(1:16,4,4)
seq_len(nrow(z))
#Extracting the elements of matrix through Nested for loop
for(i in seq_len(nrow(z))) {
for(j in seq_len(ncol(z)))
print(z[i,j])
}
for(i in 1:5){
for(j in 1:2)
print(i*j)
}
#---------------------------------While Loop-------------------------------
#While loop begins with testing a condition.
#If it is true then they execute the loop body
#Once the loop body is executed ,
#the condition is tested again and so forth.
count <- 0
while(count < 10)
{print(count)
count <- count + 1
}
## While loop can potentially results in infinite loop if not written
##properly. So we should use with care
y <- 0
i <- 100
while(i <= 200) {
i = i+(i*.085)
y = y+1
print(i)
print(y)
}
192.0604+192.0604*0.085 #After this loop condition ends
#----------------------------------------Repeat Loop-----------------------------
##Repeat loop initiates an infinite loop,these are noy commonly used in
##statistical applications but they do have their uses. The only way to exit
## a repeat loop is to call (break)
i <- 2
repeat {
print(i)
i = i + 1
if(i > 4)
break
}
## If (break) is ommitted it will be an infinite loop
#-----------------------------Statistics in R---------------------------------
install.packages(nortest)
library(nortest)
#T - test
ClassA = c(18,22,21,17,20,17,23,20,22,21)
ClassB = c(16,20,14,21,20,18,13,15,17,21)
length(ClassA)
length(ClassB)
mean(ClassA)
mean(ClassB)
median(ClassA)
median(ClassB)
boxplot(ClassA,ClassB)
summary(ClassA)
summary(ClassB)
#----------Two-tailed T-test-------------
#HO: There is no difference between the means
#H1: The mean of two groups are not the same
t.test(ClassA,ClassB) #We reject null H0 as pvalue = 0.03798
#When P-value is less then 0.05 we reject H0 at 95% confidence interval
#Confidence Level :90% , p-value: 0.1
#Confidence Level :95% , p-value: 0.05
#Confidence Level :99% , p-value: 0.01
#Confidence Level :99.9% , p-value: 0.001
#----------One-tailed T-test----------
##H0 : There is no difference
##H1 : The difference is less then 0 # Mean of ClassA is less than ClassB
t.test(ClassA,ClassB,alternative = "less",var.equal = T)
##H0 : The difference is less then 0
##H1 : The difference is greater then 0
t.test(ClassA,ClassB,alternative = "greater",var.equal = T)
#-----------------------Analysis of Variance (ANOVA)----------------
smoke <- c(38,42,14,41,41,16,36,39,18,32,36,15,28,33,17)
income <- c(1,1,1,2,2,2,3,3,3,4,4,4,5,5,5)
age <- c(1,2,3,1,2,3,1,2,3,1,2,3,1,2,3)
data4 <- data.frame(smoke,income,age)
##Here we want to test whether the score of smoke is different across
##categories of income or age
#Test with age var
fit <- aov(smoke ~ age , data = data4)
summary(fit)
#Test with income var
fit <- aov(smoke ~ income , data = data4)
summary(fit)
##To Test
#Categorical - Continous Variables we use ANOVA
#Categorical - Categorical Variables we use Chi-square
#Continous -Continous Variables we use Correlation
#---------------------------Chi-square Test of Independence---------------
##When both variables are categorical in nature
##Two random variables x and y are called Independent
##If the probability distribution of one variable is not affected by the presence of the another.
library(MASS)
sur <- survey
head(survey)
dim(sur)
levels(sur$Smoke)
levels(sur$Exer)
tbl = table(sur$Smoke,sur$Exer)#Contingency Table of Smoke and Exerc
tbl
#Test the hypothesis whether the student smoking habit is independent of their
#exercise level at .05 level of significance
chisq.test(tbl)
#Since , p-value is >.05 hence we fail to reject H0 : Smoking habit is independent
#of their Exercise level
#---------------------------------Normality Test------------------------------
install.packages("nortest",dependencies = TRUE)
library(nortest)
head(mtcars)
hist(mtcars$mpg)
barplot(mtcars$mpg)
##Superimpose a Normal Curve
x <- mtcars$mpg
m <- median(x)
x
m
std <- sqrt(var(x))
std
hist(x,density = 20,breaks = 20,prob = TRUE, xlab = "x-variable",
ylim = c(0,0.15),main = "Normal curve over histogram")
curve(dnorm(x,mean = m,sd=std),col="darkblue",lwd=2,add = TRUE)
ad.test(x)
#---------------Treatment of Missing Values :Example with substituting with the mean of the series
A <- data.frame(a=1:10,b=11:20)
A[A$b<14,"b"]=NA
A
A1<-A
as.data.frame(colSums(is.na(A)))
#Imputing the values with mean value of the series
A1[is.na(A1$b),"b"] = mean(A1$b,na.rm = T)
A1
#Imputing the values with median value of the series
A1[is.na(A1$b),"b"] = mean(A1$b,na.rm = T)
A1
#------------------SQL in R using package SQLDF-----------------------
Name <- c("a","b","c","d","e","f","g","h","i","j")
Age <- c(22,43,12,17,29,5,51,56,9,44)
Sex <- c("M","M","F","M","M","M","F","M","F","M")
data1 <- data.frame(Name,Age,Sex,stringsAsFactors = FALSE)
data1
summary(data1)
data1$Sex <- as.factor(data1$Sex)
install.packages("sqldf",dependencies = TRUE)
library(sqldf)
#-----Selecting all the variables-----
S1 <- sqldf("select *
from data1") # * - refers to all var in the dataset
S1
#-----Selecting Variables based on Names--
S2 <- sqldf("select Name, Age
from data1")
S2
#------Creating Alias Names from the column----
S3 <- sqldf("select name as Full_name, age as Total_age
from data1")
S3
#------------Subsetting Data with Condition----
S4 <- sqldf("select *
from data1
where Age > 20") #where condition
S4
#------------Subsetting Data with AND condition-----
S5 <- sqldf('select *
from data1
where Age > 20 AND Sex == "M" ')
S5
#------------Subsetting Data with OR condition------
S6 <- sqldf("select *
from data1
where Age > 20 OR Sex == 'F'")
S6
#-----------Create new column with condition------
S7 <- sqldf('select *,
Age+10 as Age_new,
Age-avg(Age) as Age_old
from data1')
S7
#-----------Create a new column with condition----
S8 <- sqldf('select *,
Age+10 as Age_new,
Age-10 as Age_old
from data1')
S8
#-----------Descriptive Stat in SQL------
S9 <- sqldf('select
min(age) as min_age,
max(age) as max_age,
avg(age) as avg_age,
count(age) as count_age,
sum(age) as sum_age
from data1')
S9
#-----------Descriptive Stat in SQL - segregated by sex ----
S10 <- sqldf('select Sex,
min(age) as min_age,
max(age) as max_age,
avg(age) as avg_age,
count(age) as count_age,
sum(age) as sum_age
from data1
group by Sex')
S10
#--------Descriptive Stat in SQL - segregated by sex - descending sorted by sex
S11 <- sqldf('select Sex,
min(age) as min_age,
max(age) as max_age,
avg(age) as avg_age,
count(age) as count_age,
sum(age) as sum_age
from data1
group by Sex
order by Sex desc')
S11
#---------Else if statement-----
S12 <- sqldf("select *,
case
when Age <= 20 then 'A'
when Age > 20 AND Age <= 40 then 'B'
else 'C'
end classify
from data1
order by classify desc")
S12
S13 <- sqldf("select *,
case
when Name == 'b' then 'Correct'
else 'Incorrect'
end new
from data1")
S13
#----------Filtering Data-------
S14 <- sqldf('select *
from data1
where Age< 40 and Sex == "M"')
S14
#-------- Always use "having" with group by and on summary stat
#where command does'nt work on aggregated columns, we use 'Having'
#in that columns.
#Usage of Having statement
S15 <- sqldf("select Sex,Age,
max(age) as max_age,
min(age) as min_age
from data1
group by Sex
having max_age - min_age >5")
S15
#---------Inserting a new field--------
S16 <- sqldf("select *, square(age) sqr_age, sqrt(age) sqrt_age
from data1")
S16
#--------Exporting data to csv-------
write.csv(S16,"SQLinR.csv")
getwd()
S17 <- sqldf("select *
from data1
where Age in (29,51,56)")
S17
S18 <- sqldf("select *
from data1
where Name in ('a','d','e','h')")
S18
|
274b013d1fbd0705a3be1eb94ebe2a4d4c218494 | 0fc75bfea69ffe1908941c4d1f2a9f6e7b0056c0 | /nomsplit.R | a4eeacdfa4c8a3de44f468f3a16127e64b655b34 | [] | no_license | citizen-monitoring/citizen-monitoring.github.io | 1100f3abb2c9555e5870c2e63331b731e45ac4bc | 2ae7a1c8e8af216c66ff75c82dd60efac91f7d00 | refs/heads/master | 2021-01-10T13:43:06.225014 | 2016-02-04T00:54:41 | 2016-02-04T00:54:41 | 49,733,098 | 0 | 2 | null | 2016-01-18T04:15:48 | 2016-01-15T17:10:20 | R | UTF-8 | R | false | false | 1,087 | r | nomsplit.R | # Split an input data set based on whether the respondant was nominated or not
# Function: nomsplit
nomsplit = function(x){
library(tidyr)
nomYes = filter(x, x$Nominated == "yes") # separate nominated respondents
nomNo = filter(x, x$Nominated == "no") # separate non-nominated respondents
final = list(nomYes, nomNo) # creates a list with the two outputs (Y set and N set)
return(final) # returns the list of 2 objects, with Y nominations being [[1]] and N being [[2]]
}
# Program to run the function and separate the "yes" and "no" outputs.
testnom = nomsplit(ugandasms) # run the function nomsplit, save to a variable "testnom" which will be a list of 2
# The function has returned a list of 2 objects. Now we need to separate those objects into separate tables. The double brackets allow us to do this.
# There's probably a cleaner way to do this within the function that I don't know.
nomYes = testnom[[1]] # separate out the "yes" object from the list, save to a new object
nomNo = testnom[[2]] # separate out the "no" object from the list, save to a new object |
fbcf067c0f36727d95984b299b6cd65ef63215e9 | d3b774668f6e577cefdeea4dd2be1326ee4b5aee | /man/checkarg_file_name.Rd | 832136b4b89db4a7a0111d6aaa72b8ca75a25d09 | [
"MIT"
] | permissive | ropensci/qualtRics | 50e68a3dd3f184ee14f19126bd7783b4b9bd61d1 | c721563fa2fcb734c1ad9c4d8ccd80bbefbed15d | refs/heads/main | 2023-08-31T01:00:05.366989 | 2023-06-23T18:55:13 | 2023-06-23T18:55:13 | 70,817,337 | 188 | 64 | NOASSERTION | 2023-09-07T19:38:56 | 2016-10-13T14:51:26 | R | UTF-8 | R | false | true | 325 | rd | checkarg_file_name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checks.R
\name{checkarg_file_name}
\alias{checkarg_file_name}
\title{Check if survey file specified in file_name exists}
\usage{
checkarg_file_name(file_name)
}
\description{
Check if survey file specified in file_name exists
}
\keyword{internal}
|
680600f0f42c6e655331ab89a3e53aadd92e6859 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Introductory_Statistics_by_Douglas_S_Shafer_And_Zhiyi_Zhang/CH2/EX2.10/Ex2_10.R | 7f0e8f16493d9951bc1f62d2da98dd52759add96 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 210 | r | Ex2_10.R | #Page 58
dataset_1<-c(40,38,42,40,39,39,43,40,39,40)
dataset_2<-c(46,37,40,33,42,36,40,47,34,45)
findrange=function(v){
range=max(v)-min(v)
print(range)
}
findrange(dataset_1)
findrange(dataset_2)
|
bc6ce74dea21f7c3f999ceb5318b4897c716e4fe | eeb4249594b67f0564e8563ab83ecf641ef3ed8f | /man/seq_scan_sim.Rd | ee5579c7d0cf69b0caf4fdd97a969714bdcf3b43 | [] | no_license | cran/smerc | 6e81aaa86f2405364f2368079a07f949317848fc | aab00112b726a9392395b1937f3f92e1bbd3cb3e | refs/heads/master | 2023-07-23T05:53:39.316070 | 2023-07-15T19:30:02 | 2023-07-15T20:30:28 | 48,088,790 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,016 | rd | seq_scan_sim.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seq_scan_sim.R
\name{seq_scan_sim}
\alias{seq_scan_sim}
\title{Perform scan test on simulated data sequentially}
\usage{
seq_scan_sim(
nsim = 1,
nn,
ty,
ex,
type = "poisson",
ein = NULL,
eout = NULL,
tpop = NULL,
popin = NULL,
popout = NULL,
cl = NULL,
simdist = "multinomial",
pop = NULL,
min.cases = 0,
ldup = NULL,
lseq_zones
)
}
\arguments{
\item{nsim}{A positive integer indicating the number of
simulations to perform.}
\item{nn}{A list of nearest neighbors produced by \code{\link{nnpop}}.}
\item{ty}{The total number of cases in the study area.}
\item{ex}{The expected number of cases for each region.
The default is calculated under the constant risk
hypothesis.}
\item{type}{The type of scan statistic to compute. The
default is \code{"poisson"}. The other choice
is \code{"binomial"}.}
\item{ein}{The expected number of cases in the zone.
Conventionally, this is the estimated overall disease
risk across the study area, multiplied by the total
population size of the zone.}
\item{eout}{The expected number of cases outside the
zone. This should be \code{ty - ein} and is computed
automatically if not provided.}
\item{tpop}{The total population in the study area.}
\item{popin}{The total population in the zone.}
\item{popout}{The population outside the zone. This
should be \code{tpop - popin} and is computed
automatically if not provided.}
\item{cl}{
A cluster object created by \code{\link{makeCluster}},
or an integer to indicate number of child-processes
(integer values are ignored on Windows) for parallel evaluations
(see Details on performance).
It can also be \code{"future"} to use a future backend (see Details),
\code{NULL} (default) refers to sequential evaluation.
}
\item{simdist}{Character string indicating the simulation
distribution. The default is \code{"multinomial"}, which
conditions on the total number of cases observed. The
other options are \code{"poisson"} and \code{"binomial"}}
\item{pop}{The population size associated with each
region.}
\item{min.cases}{The minimum number of cases required for
a cluster. The default is 2.}
\item{ldup}{A logical vector indicating positions of duplicated zones. Not
intended for user use.}
\item{lseq_zones}{A list of logical vectors specifying the sequence of
relevant zones based on ubpop constraints}
}
\value{
A list with the maximum statistic for each population upperbound for
each simulated data set. Each element will have a vector of maximums for
each simulated data set corresponding to the sequence of ubpop values. The
list will have \code{nsim} elements.
}
\description{
\code{seq_scan_sim} efficiently performs \code{\link{scan.test}} on a
simulated data set. The function is meant to be used internally by the
\code{\link{optimal_ubpop}} function in the smerc package.
}
\keyword{internal}
|
5d320757e2c7398ec0593c8627583b1bea9a2eca | 66e04f24259a07363ad8da7cd47872f75abbaea0 | /Intro to SQL for Data Science/Chapter 1-Selecting columns/4.R | aa1ebe9d241bae23967c97eba30f40a54c97c1ca | [
"MIT"
] | permissive | artileda/Datacamp-Data-Scientist-with-R-2019 | 19d64729a691880228f5a18994ad7b58d3e7b40e | a8b3f8f64cc5756add7ec5cae0e332101cb00bd9 | refs/heads/master | 2022-02-24T04:18:28.860980 | 2019-08-28T04:35:32 | 2019-08-28T04:35:32 | 325,043,594 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 495 | r | 4.R | # Onboarding | Bullet Exercises
# Another new feature we're introducing is the bullet exercise, which allows you to easily practice a new concept through repetition. Check it out below!
#
# Instructions 1/3
# 35 XP
# 1
# Submit the query in the editor! Don't worry, you'll learn how it works soon.
SELECT 'SQL'
AS result;
#Now change 'SQL' to 'SQL is' and click Submit!
SELECT 'SQL is'
AS result;
#Finally, change 'SQL is' to 'SQL is cool!' and click Submit!
SELECT 'SQL is cool!'
AS result;
|
f8e5419b174e5248488e7864d40d9864ad99ca0f | 7492f6d8c4b9f2504ecd35003072a27f2b39d5ff | /project3.r | adf0075bb2ed3fb7e29d18c620ce213387c7d399 | [] | no_license | linleiwen/Credit-Card-Fraud-Detection | 4d172e107acd9432bd869af07b80829e8339782d | 831ceea64ff95fda7face32a9c9d5b89372b5a66 | refs/heads/master | 2021-05-14T00:44:03.975457 | 2018-01-17T15:15:25 | 2018-01-17T15:15:25 | 116,546,088 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,699 | r | project3.r | library(randomForest)
library(caret)
library(ROCR)
library(DMwR)# SMOTE more positive cases
library(data.table)
library(zoo)
library(parallel)
library(ggplot2)
library(dplyr)
detectCores()
df <- fread("creditcard.csv")
#### Exploratory analysis
prop.table(table(df$Class))
summary(df)
sum(is.na(df)) ##check na
set.seed(1003)
ggplot(df, aes(x=V3)) + geom_density(aes(group=Class, colour=Class, fill=Class), alpha=0.3)
#### Data pre-processing
## 'normalize' the data
transform_columns <- c("V","Amount")
transformed_column <- df[ ,grepl(paste(transform_columns, collapse = "|"),names(df)),with = FALSE]
transformed_column_processed <- predict(preProcess(transformed_column, method = c("BoxCox","scale")),transformed_column)
df_new <- data.table(cbind(transformed_column_processed,Class = df$Class))
df_new[,Class:=as.factor(Class)]
set.seed(1003)
#### split into Training and Test dataset
training_index <- createDataPartition(df_new$Class, p=0.7,list=FALSE)
training <- df_new[training_index,]
test<- df_new[-training_index,]
####smote
'''table(training$Class)
training <- SMOTE(Class ~ ., training, perc.over = 57600, perc.under=100) ## inflate "1" by x percentage, reduce "0" as y percentage
prop.table(table(training$Class))
table(training$Class) '''
### Logistic regression
logit <- glm(Class ~ ., data = training, family = "binomial")
logit_pred <- predict(logit, test, type = "response")
logit_prediction <- prediction(logit_pred,test$Class)
logit_recall <- performance(logit_prediction,"prec","rec") ##precision vs recall
logit_roc <- performance(logit_prediction,"tpr","fpr") ## TP rate vs NP rate
logit_auc <- performance(logit_prediction,"auc")
##kernel SVM
library(e1071)
ksvm.model = svm(formula = Class ~ .,
data = training,
type = 'C-classification', ## for classification
kernel = 'radial',probability=TRUE) ## Gaussian not linear
KSVM_pred = predict(ksvm.model, test, probability=TRUE)
KSVM_prediction = prediction(attr(KSVM_pred,"probabilities")[,2],test$Class)
KSVM_recall <- performance(KSVM_prediction,"prec","rec") ##precision vs recall
KSVM_roc <- performance(KSVM_prediction,"tpr","fpr") ## TP rate vs FP rate
KSVM_auc <- performance(KSVM_prediction,"auc")
### Random forest
rf.model <- randomForest(Class ~ ., data = training,ntree = 200, nodesize = 20)
rf_pred <- predict(rf.model, test,type="prob")
rf_prediction <- prediction(rf_pred[,2],test$Class)
rf_recall <- performance(rf_prediction,"prec","rec")
rf_roc <- performance(rf_prediction,"tpr","fpr")
rf_auc <- performance(rf_prediction,"auc")
### Bagging Trees
ctrl <- trainControl(method = "cv", number = 10)
tb_model <- train(Class ~ ., data = training, method = "treebag",
trControl = ctrl)
tb_pred <- predict(tb_model$finalModel, test, type = "prob")
tb_prediction <- prediction(tb_pred[,2],test$Class)
tb_recall <- performance(logit_prediction,"prec","rec")
tb_roc <- performance(logit_prediction,"tpr","fpr")
tb_auc <- performance(logit_prediction,"auc")
## xgboost
library(dplyr)
library(xgboost)
training[,Class:=as.integer(Class)-1]
test[,Class:=as.integer(Class)-1]
classifier = xgboost(data = as.matrix(training[,-30]), label = training$Class, nrounds = 100)
## as.matrix: xgboost only accept matrix;nrounds: train iteration
# Predicting the Test set results
xgb_pred = predict(classifier, newdata = as.matrix(test[,-30]))
xgb_prediction <- prediction(xgb_pred,test$Class)
xgb_recall <- performance(xgb_prediction,"prec","rec") ##precision vs recall
xgb_roc <- performance(xgb_prediction,"tpr","fpr") ## TP rate vs NP rate
xgb_auc <- performance(xgb_prediction,"auc")
##plot result
plot(logit_recall,col='pink')
plot(rf_recall, add = TRUE, col = 'red')
plot(tb_recall, add = TRUE, col = 'green')
plot(xgb_recall, add = TRUE, col = 'black')
plot(KSVM_recall, add = TRUE, col = 'blue')
#### Functions to calculate 'area under the pr curve'
auprc <- function(pr_curve) {
x <- as.numeric(unlist(pr_curve@x.values))
y <- as.numeric(unlist(pr_curve@y.values))
y[is.nan(y)] <- 1
id <- order(x)
result <- sum(diff(x[id])*rollmean(y[id],2))
return(result)
}
auprc_results <- data.frame(logit=auprc(logit_recall)
, rf = auprc(rf_recall)
, tb = auprc(tb_recall)
, xgb = auprc(xgb_recall)
,KSVM = auprc(KSVM_recall) )
non_smote_aucpre = auprc_results
#smote_aucpre = auprc_results
non_smote_aucpre
aucroc_results <- data.frame(logit=as.numeric(attr(logit_auc,"y.values"))
, rf = as.numeric(attr(rf_auc,"y.values"))
, tb = as.numeric(attr(tb_auc,"y.values"))
, xgb = as.numeric(attr(xgb_auc,"y.values"))
,KSVM = as.numeric(attr(KSVM_auc,"y.values")) )
#non_smote_aucroc = aucroc_results
smote_aucroc = aucroc_results
temp = t(data.frame(rbind(non_smote_aucpre,smote_aucpre,non_smote_aucroc,smote_aucroc),row.names = c("non_smote_aucpre","smote_aucpre","non_smote_aucroc","smote_aucroc")))
temp = melt(temp,varnames = c("model","type"))
ggplot(data = temp, aes(x = type, y = value, colour = model, group = model))+geom_line(size = 1)
##ggplot plot ROC and precision and recall curve
### Logistic regression
sscurves1 <- evalmod(scores = logit_pred, labels = test$Class)
autoplot(sscurves1)
##kernel SVM
sscurves2 <- evalmod(scores = attr(KSVM_pred,"probabilities")[,2], labels = test$Class)
autoplot(sscurves2)
##rf
sscurves3 <- evalmod(scores = rf_pred[,2], labels = test$Class)
autoplot(sscurves3)
##tb
sscurves4 <- evalmod(scores = tb_pred[,2], labels = test$Class)
autoplot(sscurves4)
##xgboost
sscurves5 <- evalmod(scores = xgb_pred, labels = test$Class)
autoplot(sscurves5)
x = list(scores = list(list(logit_pred,attr(KSVM_pred,"probabilities")[,2],rf_pred[,2],tb_pred[,2],xgb_pred)),labels = test$Class, modnames = c("random","poor_er","good_er","excel","excel"), dsids = c(1,1,1,1,1))
mdat <- mmdata(x[["scores"]], x["labels"],modnames = c("Logistic regression ","kernel SVM ","random forest","tree baging","xgboost"))
## Generate an mscurve object that contains ROC and Precision-Recall curves
mscurves <- evalmod(mdat)
## ROC and Precision-Recall curves
autoplot(mscurves)
sum(rf_pred[,2]>=0.00172)
cm = table(test$Class, rf_pred[,2]>=0.05)
cm/sum(cm)
(cm[1,1]+cm[2,2])/sum(cm)
sum(test$Class)/sum(cm)
save.image("project3.Rdata")
#load("project3.Rdata")
|
24127f85cca60791a831b3bf45e68b6b7143e062 | 280fae7f01002ddc95c0e7ec617740a58752403d | /R/readCPEAT.R | e784900ff2c67f0c8334916fdbb9a5da4850c74e | [
"BSD-2-Clause"
] | permissive | ktoddbrown/soilDataR | 87bb4ed675959f3fbd75024dd7b014e1966148dd | 44ab9e6ac00e49ea0106508de8ead356d9e39fa5 | refs/heads/master | 2021-04-30T07:27:34.349030 | 2018-11-09T20:07:20 | 2018-11-09T20:07:20 | 92,432,342 | 3 | 11 | null | null | null | null | UTF-8 | R | false | false | 9,398 | r | readCPEAT.R | #' CPEAT project reads
#'
#' This reads in the specified records of the CPEAT project. Currently under development, not all
#' the metadata is parsed
#'
#' @param dataDir identify the download directory
#'
#' @return a list of data frames, the first data frame with the meta data and
#' a second data frame with the records
#' @export
#'
readCPEAT <- function(dataDir, workLocal = FALSE){
downloadDOI <- read.csv(text=gsub(' ', '', gsub(' core ', '_c', 'URL,Author,Site_core,orgFile,extra
https://doi.org/10.1594/PANGAEA.890471,Garneau,Aero,Aero.csv,
https://doi.org/10.1594/PANGAEA.890528,Yu,Altay core 1,Altay.csv,
https://doi.org/10.1594/PANGAEA.890198,Nichols,Bear core 1,Bear.csv,
https://doi.org/10.1594/PANGAEA.890472,Charman,Burnt Village core 1,Burnt_Village.csv,
https://doi.org/10.1594/PANGAEA.890473,Lavoie,Covey Hill,Covey_Hill. csv,
https://doi.org/10.1594/PANGAEA.890474,MacDonald,D127 core 1,D127.csv,
https://doi.org/10.1594/PANGAEA.890475,MacDonald,E110 core 1,E110.csv,
https://doi.org/10.1594/PANGAEA.890345,Sannel,Ennadai core 1,Ennadai.csv,
https://doi.org/10.1594/PANGAEA.890529,Anderson,Glen Carron core 1,Glen_Carron.csv,
https://doi.org/10.1594/PANGAEA.890530,Anderson,Glen Torridon core 1,Glen_Torridon.csv,
https://doi.org/10.1594/PANGAEA.890346,Yu,Goldeye fen,Goldeye.csv,
https://doi.org/10.1594/PANGAEA.890397,Packalen,HL02,HL02.csv,
https://doi.org/10.1594/PANGAEA.890531,Large,Hongyuan core HYLK1,Hongyuan.csv,
https://doi.org/10.1594/PANGAEA.890199,Jones,Horse Trail core 1,Horse_Trail.csv,
https://doi.org/10.1594/PANGAEA.890398,Holmquist,JBL1,JBL1.csv,
https://doi.org/10.1594/PANGAEA.890399,Holmquist,JBL2,JBL2.csv,
https://doi.org/10.1594/PANGAEA.890400,Holmquist,JBL3,JBL3.csv,
https://doi.org/10.1594/PANGAEA.890401,Holmquist,JBL4,JBL4.csv,
https://doi.org/10.1594/PANGAEA.890402,Holmquist,JBL5,JBL5.csv,
https://doi.org/10.1594/PANGAEA.890403,Holmquist,JBL7,JBL7.csv,
https://doi.org/10.1594/PANGAEA.890404,Holmquist,JBL8,JBL8.csv,
https://doi.org/10.1594/PANGAEA.890405,Camill,Joey core 12,Joey.csv,
https://doi.org/10.1594/PANGAEA.890406,Camill,Joey core 15,Joey.csv,
https://doi.org/10.1594/PANGAEA.890407,Camill,Joey core 17,Joey.csv,
https://doi.org/10.1594/PANGAEA.890408,Camill,Joey core 2,Joey.csv,
https://doi.org/10.1594/PANGAEA.890409,Camill,Joey core 5,Joey.csv,
https://doi.org/10.1594/PANGAEA.890410,Camill,Joey core 7,Joey.csv,
https://doi.org/10.1594/PANGAEA.890532,Loisel,KAM core C1,KAM12-C1.csv,
https://doi.org/10.1594/PANGAEA.890533,Bochicchio,KAM core C4,KAM12-C4.csv,
https://doi.org/10.1594/PANGAEA.890200,Yu,Kenai Gasfield,Kenai_Gasfield.csv,
https://doi.org/10.1594/PANGAEA.890411,Packalen,KJ2-3,KJ2-3.csv,
https://doi.org/10.1594/PANGAEA.890412,Lamarre,KUJU,KUJU.csv,
https://doi.org/10.1594/PANGAEA.890527,Borren,Kvartal core Zh0,86-Kvartal.csv,
https://doi.org/10.1594/PANGAEA.890413,Garneau,La Grande core L2T2C2-2,La_Grande2.csv,
https://doi.org/10.1594/PANGAEA.890414,Garneau,La Grande core L3T1C2,La_Grande3.csv,
https://doi.org/10.1594/PANGAEA.890476,van Bellen,Lac Le Caron ,Lac_Le_Caron.csv,
https://doi.org/10.1594/PANGAEA.890415,Camill,Lake 396 core 3,Lake396.csv,
https://doi.org/10.1594/PANGAEA.890416,Camill,Lake 785 core 4,Lake785.csv,
https://doi.org/10.1594/PANGAEA.890477,Magnan,Lebel core 1,Lebel.csv,
https://doi.org/10.1594/PANGAEA.890478,Mathijssen,Lompolojankka core 1,Lompolojankka.csv,
https://doi.org/10.1594/PANGAEA.890347,Yu,Mariana core 1,Mariana.csv,
https://doi.org/10.1594/PANGAEA.890348,Yu,Mariana core 2,Mariana.csv,
https://doi.org/10.1594/PANGAEA.890349,Yu,Mariana core 3,Mariana.csv,
https://doi.org/10.1594/PANGAEA.890350,Robinson,Martin core 1,Martin.csv,
https://doi.org/10.1594/PANGAEA.890479,van Bellen,Mosaik core Central,Mosaik.csv,
https://doi.org/10.1594/PANGAEA.890201,Yu,No Name Creek,No_Name_Creek.csv,
https://doi.org/10.1594/PANGAEA.890186,Yu,Nuikluk core 10-1,Nuikluk.csv,
https://doi.org/10.1594/PANGAEA.890202,Yu,Nuikluk core 10-2,Nuikluk.csv,
https://doi.org/10.1594/PANGAEA.890203,Tarnocai,NW-BG core 10,NW-BG.csv,
https://doi.org/10.1594/PANGAEA.890204,Tarnocai,NW-BG core 2,NW-BG.csv,
https://doi.org/10.1594/PANGAEA.890205,Tarnocai,NW-BG core 3,NW-BG.csv,
https://doi.org/10.1594/PANGAEA.890206,Tarnocai,NW-BG core 8,NW-BG.csv,
https://doi.org/10.1594/PANGAEA.890480,Garneau,Ours core 1,Ours.csv,
https://doi.org/10.1594/PANGAEA.890481,Garneau,Ours core 4,Ours.csv,
https://doi.org/10.1594/PANGAEA.890482,Garneau,Ours core 5,Ours.csv,
https://doi.org/10.1594/PANGAEA.890351,Yu,Patuanak core 1,Patuanak.csv,
https://doi.org/10.1594/PANGAEA.890208,Loisel,Petersville,Petersville.csv,
https://doi.org/10.1594/PANGAEA.890534,Charman,Petite Bog core 1,Petite_Bog.csv,
https://doi.org/10.1594/PANGAEA.890483,Magnan,Plaine core 1,Plaine.csv,
https://doi.org/10.1594/PANGAEA.890484,Oksanen,Rogovaya core 2,Rogovaya.csv,
https://doi.org/10.1594/PANGAEA.890485,Oksanen,Rogovaya core 3,Rogovaya.csv,
https://doi.org/10.1594/PANGAEA.890486,Makila,Saarisuo core B800,Saarisuo.csv,
https://doi.org/10.1594/PANGAEA.890352,Sannel,Selwyn Lake,Selwyn.csv,
https://doi.org/10.1594/PANGAEA.890417,Camill,Shuttle core 2,Shuttle.csv,
https://doi.org/10.1594/PANGAEA.890535,MacDonald,SIB06 core 1,SIB06.csv,
https://doi.org/10.1594/PANGAEA.890487,Charman,Sidney core 1,Sidney.csv,
https://doi.org/10.1594/PANGAEA.890488,Mathijssen,Siikavena core 1,Siikavena.csv,
https://doi.org/10.1594/PANGAEA.890353,Kuhry,Slave Lake ,Slave.csv,
https://doi.org/10.1594/PANGAEA.890489,van Bellen,Sterne,Sterne.csv,
https://doi.org/10.1594/PANGAEA.890490,Kokfelt,Stordalen,Stordalen.csv,
https://doi.org/10.1594/PANGAEA.890354,Yu,Sundance core 2,Sundance.csv,
https://doi.org/10.1594/PANGAEA.890355,Yu,Sundance core 3,Sundance.csv,
https://doi.org/10.1594/PANGAEA.889936,Jones,Swanson Fen,Swanson.csv,
https://doi.org/10.1594/PANGAEA.890356,Tarnocai,T1 core 1,T1.csv,
https://doi.org/10.1594/PANGAEA.890418,Camill,Unit core 4,Unit.csv,
https://doi.org/10.1594/PANGAEA.890357,Yu,Upper Pinto,Upper_Pinto.csv,
https://doi.org/10.1594/PANGAEA.890536,Oksanen,Usinsk core USI1,Usinsk.csv,
https://doi.org/10.1594/PANGAEA.890358,Yu,Utikuma core 1,Utikuma.csv,
https://doi.org/10.1594/PANGAEA.890537,MacDonald,V34 core 1,V34.csv,
https://doi.org/10.1594/PANGAEA.890538,Borren,Vasyugan core V21,Vasyugan.csv,
https://doi.org/10.1594/PANGAEA.890539,Bunbury,VC04-06 core 1,VC04-06.csv,
https://doi.org/10.1594/PANGAEA.890540,Zhao,Zoige core 1,Zoige.csv,')),
stringsAsFactors = FALSE) %>%
dplyr::select(-extra) %>%
dplyr::mutate(downloadURL = glue::trim(paste(gsub('doi.org', 'doi.pangaea.de', URL),
'?format=textfile', sep='')),
localFile = file.path(dataDir, paste0(Site_core, '.tab')))
if(any(!file.exists(downloadDOI$localFile)) & !workLocal){
#print(downloadDOI$localFile[!file.exists(downloadDOI$localFile)])
download.file(downloadDOI$downloadURL[!file.exists(downloadDOI$localFile)],
downloadDOI$localFile[!file.exists(downloadDOI$localFile)])
}
if(workLocal){
downloadDOI <- downloadDOI %>%
filter(file.exists(localFile))
}
allData <- plyr::ddply(downloadDOI, c('Site_core'), function(xx){
#print(xx$localFile)
readText <- read_file(xx$localFile)
#header <- regmatches(readText, regexpr('/\\* .*\n\\*/', readText))
return(read_tsv(gsub('/\\* .*\n\\*/\n', '', readText)))
})
allheader <- plyr::ddply(downloadDOI, c('Site_core'), function(xx){
#print(xx$localFile)
readText <- read_file(xx$localFile)
header <- regmatches(readText, regexpr('/\\* .*\n\\*/', readText))
ans <- unlist(strsplit(x=header, split='\n.+:\t', perl=TRUE))
ans <- ans[-1] #pop off the header
names(ans) <- gsub('\\n|\\t|:', '',
unlist(regmatches(header, gregexpr('\n.+:\t', header, per=TRUE))))
return(as.data.frame(as.list(ans)))
})
metaData <- allheader %>%
dplyr::mutate(Size = as.numeric(gsub(' data points\n\\*/', '', Size))) %>%
dplyr::mutate_at(vars(License), as.factor) %>%
tidyr::separate(Coverage, c('lat_lab', 'lat', 'lon_lab', 'lon',
'min_depth_lab', 'min_depth', 'max_depth_lab', 'max_depth'),
sep='(: )|( \\* )|(\n\t)') %>%
#dplyr::select(-lat_lab, -lon_lab, -min_depth_lab, -max_depth_lab) %>%
dplyr::mutate(min_depth = gsub(' m$', '', min_depth),
max_depth = gsub(' m$', '', max_depth)) %>%
dplyr::mutate_at(vars(min_depth, max_depth, lat, lon), as.numeric) %>%
dplyr::group_by_all()
#TODO this needs to be parsed
# %>%
# do((function(xx){
# #print(xx$Event.s.)
# xxtemp <- gsub('COMMENT:','', as.character(xx$Event.s.))
# #xxtemp <- gsub('Details.+:', '', xxtemp, perl = TRUE)
# xxtemp <- strsplit(unlist(strsplit(as.character(xxtemp), '( \\* )|(; )|( : )|(, )')), ': ')
# xxtemp[[1]] <- c('name', xxtemp[[1]][1])
# xxtemp <- lapply(xxtemp, function(yy){
# if(length(yy) > 2){return(yy[length(yy)-1:0])} else {return(yy)}})
#
# xxtemp2 <- t(as.data.frame(xxtemp))
# xxtemp3 <- data.frame(as.list(setNames(xxtemp2[,2], xxtemp2[,1])))
# print(xxtemp3)
# return(xxtemp3)
# })(.))
return(list(site=metaData, sample=allData, files=downloadDOI))
#tester <- pangaear::pg_data('10.1594/PANGAEA.890471')
} |
5dbbff784f4c7b5b340c7aee86345b4366c759aa | 5db2138d26423f514ac44162f52dfae296697f96 | /bubble.gsadf and Copula.r | 5638be37fc474cdd95bb9b3efc848c2f9b2b5674 | [] | no_license | hpompom/financal-bubble | 94f4764a94537420745afec68e9fc3fe03353f10 | 692a8c46d3746f86167e1b32033b668e2adfd7fa | refs/heads/master | 2020-07-09T15:20:07.650556 | 2019-08-28T07:26:45 | 2019-08-28T07:26:45 | 204,008,670 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,453 | r | bubble.gsadf and Copula.r | library(readxl)
library(ggplot2)
library(exuber)
library(MultipleBubbles)
library(forecast)
library(fBasics)
library(VineCopula)
library(rugarch)
library(FinTS)
library(urca)
library(TSA)
library(xts)
library(pastecs)
library(tseries)
library(FinTS)
library(car)
library(lmtest)
library(PerformanceAnalytics)#加载包
par(family='STKaiti') # 改字体, 否则不显示中文
chart.Correlation(re, histogram=TRUE)
chart.Correlation(adf, histogram=TRUE)
####
set.seed(1000)##设定种子
####
logre<-function(a){
re<-c()##对数收益率
for(i in 1:(length(a)-1)){
re<-c(re,log(a[i+1]/a[i]))
}
return(re)
}
####
tj<-function(a){
mean1<-mean(a)
sd1<-sd(a)
kur1<- kurtosis(a)
ske1<- skewness(a)
sha1 <- shapiro.test(a)$p.value
gg <- data.frame(mean1,sd1,kur1,ske1,sha1)
return(gg)
}
#####
#####
jmall <- read_excel("~/Desktop/桌面/建模/数据/亚太指数.xlsx",
sheet = "剔除后", col_types = c("date",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))#载入数据
jmall<-as.data.frame(jmall)
jmallnotime<-jmall[,-1]#去除日期列
###################################
rbre<-logre(jmall[,2]);hgre<-logre(jmall[,3]);xgre<-logre(jmall[,4]);ydre<-logre(jmall[,5]);shre<-logre(jmall[,6]);szre<-logre(jmall[,7])
mlre<-logre(jmall[,8]);flbre<-logre(jmall[,9]);tgre <- logre(jmall[,10]);ynnre<-logre(jmall[,11]);xjpre <- logre(jmall[,12]);ynre <- logre(jmall[,13])
###################################
jmadf<-radf(jmallnotime,lag=1)#bsadf
jmcv<-mc_cv(length(jmallnotime[,1]),minw = 64)#关键值
autoplot(jmadf,cv=jmcv,select = TRUE)#画图
saveadf<-data.frame(jmadf$bsadf,jmcv$bsadf_cv[(1:872),2],jmall[(66:length(jmall[,1])),1])
write.table(saveadf,file = "~/desktop/saveadf.csv",sep=",")
##描述性统计
re<-data.frame(rbre,hgre,xgre,ydre,szre,shre,mlre,flbre,tgre,ynnre,xjpre,ynre)
boxre<-data.frame(收益率=c(rbre,hgre,xgre,ydre,shre,szre,mlre,flbre,tgre,ynnre,xjpre,ynre),class=c(rep("日经225",times=length(rbre))
,rep("韩国综指",times=length(rbre)),rep("恒生指数",times=length(rbre)),rep("孟买30",times=length(rbre)),rep("深圳成指",times=length(rbre))
,rep("上证指数",times=length(rbre)),rep("吉隆坡指数",times=length(rbre)),rep("马尼拉指数",times=length(rbre)),rep("泰国指数",times=length(rbre))
,rep("雅加达指数",times=length(rbre)),rep("新加坡STI",times=length(rbre)),rep("胡志明指数",times=length(rbre))))
adf<-data.frame(rbadf,hgadf,xgadf,ydadf,shadf,szadf,mladf,flbadf,tgadf,ynnadf,xjpadf,ynadf)
colnames(adf)<-c("日经225bsadf","韩国综指bsadf","恒生指数bsadf","孟买30bsadf","深证成指bsadf"
,"上证指数bsadf","吉隆坡指数bsadf","马尼拉指数bsadf","泰国综指bsadf"
,"雅加达指数bsadf","新加坡STIbsadf","胡志明指数bsadf")
#箱线图
names(boxre)<-c("收益率","地区")
boxre$地区<-factor(boxre$地区)
ggplot(data=boxre,aes(x=地区,y=收益率))+
theme(text = element_text(family = 'STKaiti'))+
geom_boxplot()
###提取数据
rbadf<-jmadf$bsadf[,1]
hgadf<-jmadf$bsadf[,2]
xgadf<-jmadf$bsadf[,3]
ydadf<-jmadf$bsadf[,4]
shadf<-jmadf$bsadf[,5]
szadf<-jmadf$bsadf[,6]
mladf<-jmadf$bsadf[,7]
flbadf<-jmadf$bsadf[,8]
tgadf<-jmadf$bsadf[,9]
ynnadf<-jmadf$bsadf[,10]
xjpadf<-jmadf$bsadf[,11]
ynadf<-jmadf$bsadf[,12]
boxadf<-data.frame(收益率=c(rbadf,hgadf,xgadf,ydadf,shadf,szadf,mladf,flbadf,tgadf,ynnadf,xjpadf,ynadf),class=c(rep("日经225",times=length(rbadf))
,rep("韩国综指",times=length(rbadf)),rep("恒生指数",times=length(rbadf)),rep("孟买30",times=length(rbadf)),rep("深圳成指",times=length(rbadf))
,rep("上证指数",times=length(rbadf)),rep("吉隆坡指数",times=length(rbadf)),rep("马尼拉指数",times=length(rbadf)),rep("泰国指数",times=length(rbadf))
,rep("雅加达指数",times=length(rbadf)),rep("新加坡STI",times=length(rbadf)),rep("胡志明指数",times=length(rbadf))))
adf<-data.frame(rbadf,hgadf,xgadf,ydadf,shadf,szadf,mladf,flbadf,tgadf,ynnadf,xjpadf,ynadf)
colnames(re)<-c("日经225","韩国综指","恒生指数","孟买30","深圳成指","上证指数","吉隆坡指数","马尼拉指数","泰国指数","雅加达指数","新加坡STI","胡志明指数"
)
colnames(adf)<-c("日经225bsadf","韩国综指bsadf","恒生指数bsadf","孟买30bsadf","上证指数bsadf"
,"深圳成指bsadf","吉隆坡指数bsadf","马尼拉指数bsadf","泰国综指bsadf"
,"雅加达指数bsadf","新加坡STIbsadf","胡志明指数bsadf")
#箱线图
names(boxadf)<-c("指数BSADF","地区")
boxadf$地区<-factor(boxadf$地区)
ggplot(data=boxadf,aes(x=地区,y=指数BSADF))+
theme(text = element_text(family = 'STKaiti'))+
geom_boxplot()
##
names(boxadf)<-c("指数BSADF","地区")
boxadf$地区<-factor(boxadf$地区)
ggplot(data=boxadf,aes(x=地区,y=指数BSADF))+
theme(text = element_text(family = 'STKaiti'))+
geom_boxplot()
#ARCH效应检验
#BSADF
ArchTest(rbadf)
ArchTest(hgadf)
ArchTest(xgadf)
ArchTest(ydadf)
ArchTest(shadf)
ArchTest(szadf)
###
#收益率平稳性检验
summary(ur.df(rbre,type = c("trend")))
summary(ur.df(hgre,type = c("trend")))
summary(ur.df(ydre,type = c("trend")))
summary(ur.df(xgre,type = c("trend")))
summary(ur.df(shre,type = c("trend")))
summary(ur.df(szre,type = c("trend")))
summary(ur.df(mlre,type = c("trend")))
summary(ur.df(flbre,type = c("trend")))
summary(ur.df(tgre,type = c("trend")))
summary(ur.df(ynnre,type = c("trend")))
summary(ur.df(ynre,type = c("trend")))
summary(ur.df(xjpre,type = c("trend")))
###收益率acf,pacf
acf(rbre)
pacf(rbre)
acf(hgre)
pacf(hgre)
acf(xgre)
pacf(xgre)
acf(ydre)
pacf(ydre)
acf(shre)
pacf(shre)
acf(szre)
pacf(szre)
acf(shre)
pacf(shre)
acf(mlre)
pacf(mlre)
acf(mlre)
pacf(mlre)
acf(flbre)
pacf(flbre)
acf(tgre)
pacf(tgre)
acf(ynnre)
pacf(ynnre)
acf(ynre)
pacf(ynre)
acf(xjpre)
pacf(xjpre)
###拟合均值-方差
########################均值-方差-分布
gjr_sstd0_1.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(0,1)),
distribution.model = "sstd" )
gjr_sstd0_3.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(0,3)),
distribution.model = "sstd" )
gjr_sstd2_2.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(2,2)),
distribution.model = "sstd" )
gjr_sstd3_1.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(3,1)),
distribution.model = "sstd" )
gjr_sstd1_0.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(1,0)),
distribution.model = "sstd" )
gjr_sstd0_2.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(0,2)),
distribution.model = "sstd" )
gjr_sstd1_2.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(1,2)),
distribution.model = "sstd" )
gjr_sstd1_1.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(1,1)),
distribution.model = "sstd" )
gjr_sstd6_0.spec = ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(6,0)),
distribution.model = "sstd" )
##均值-波动建模
##################################################################
arma_gjr_rbre<-ugarchfit(spec=gjr_sstd3_1.spec,data = rbre)#日本
arma_gjr_hgre<-ugarchfit(spec=gjr_sstd0_1.spec,data = hgre)#韩国
arma_gjr_xgre<-ugarchfit(spec=gjr_sstd0_1.spec,data = xgre)#香港
arma_gjr_ydre<-ugarchfit(spec=gjr_sstd0_3.spec,data = ydre)#印度
arma_gjr_shre<-ugarchfit(spec=gjr_sstd0_1.spec,data = shre)#上海
arma_gjr_szre<-ugarchfit(spec=gjr_sstd2_2.spec,data = szre)#深圳
arma_gjr_mlre<-ugarchfit(spec=gjr_sstd1_0.spec,data = mlre)#马来西亚
arma_gjr_flbre<-ugarchfit(spec=gjr_sstd0_2.spec,data = flbre)#菲律宾
arma_gjr_tgre<-ugarchfit(spec=gjr_sstd2_2.spec,data = tgre)#泰国
arma_gjr_ynnre<-ugarchfit(spec=gjr_sstd1_2.spec,data = ynnre)#印度尼西亚
arma_gjr_ynre<-ugarchfit(spec=gjr_sstd2_2.spec,data = ynre)#越南
arma_gjr_xjpre<-ugarchfit(spec=gjr_sstd6_0.spec,data = xjpre)#新加坡
##################################################################
residualall<-data.frame(residual_rbre,residual_hgre,
residual_xgre,residual_ydre,
residual_shre,residual_szre,
residual_mlre,residual_flbre,
residual_tgre,residual_ynnre,
residual_ynre,residual_xjpre)
#########################arch效应检验########################################
apply(residualall, 2, ArchTest)
###################################################################
#标准化残差
residual_rbre<-residuals(arma_gjr_rbre,standardize=TRUE)
residual_hgre<-residuals(arma_gjr_hgre,standardize=TRUE)
residual_xgre<-residuals(arma_gjr_xgre,standardize=TRUE)
residual_ydre<-residuals(arma_gjr_ydre,standardize=TRUE)
residual_shre<-residuals(arma_gjr_shre,standardize=TRUE)
residual_szre<-residuals(arma_gjr_szre,standardize=TRUE)
residual_mlre<-residuals(arma_gjr_mlre,standardize=TRUE)
residual_flbre<-residuals(arma_gjr_flbre,standardize=TRUE)
residual_tgre<-residuals(arma_gjr_tgre,standardize=TRUE)
residual_ynnre<-residuals(arma_gjr_ynnre,standardize=TRUE)
residual_ynre<-residuals(arma_gjr_ynre,standardize=TRUE)
residual_xjpre<-residuals(arma_gjr_xjpre,standardize=TRUE)
##数据处理
residual_rbre<-data.frame(residual_rbre)[,1]
residual_hgre<-data.frame(residual_hgre)[,1]
residual_xgre<-data.frame(residual_xgre)[,1]
residual_ydre<-data.frame(residual_ydre)[,1]
residual_shre<-data.frame(residual_shre)[,1]
residual_szre<-data.frame(residual_szre)[,1]
residual_mlre<-data.frame(residual_mlre)[,1]
residual_flbre<-data.frame(residual_flbre)[,1]
residual_ynnre<-data.frame(residual_ynnre)[,1]
residual_ynre<-data.frame(residual_ynre)[,1]
residual_xjpre<-data.frame(residual_xjpre)[,1]
residual_tgre<-data.frame(residual_tgre)[,1]
#####残差序列与0-1分布的检测
ks.test(residual_rbre,runif(1000))
ks.test(residual_hgre,runif(1000))
ks.test(residual_xgre,runif(1000))
ks.test(residual_ydre,runif(1000))
ks.test(residual_shre,runif(1000))
ks.test(residual_szre,runif(1000))
ks.test(residual_mlre,runif(1000))
ks.test(residual_flbre,runif(1000))
ks.test(residual_ynnre,runif(1000))
ks.test(residual_ynre,runif(1000))
ks.test(residual_xjpre,runif(1000))
######概率积分变换后的残差序列;0-1分布的检测
####概率积分变化
residual_rbre_01<-pobs(residual_rbre)#1日本
residual_hgre_01<-pobs(residual_hgre)#2韩国
residual_xgre_01<-pobs(residual_xgre)#3香港
residual_ydre_01<-pobs(residual_ydre)#4印度
residual_shre_01<-pobs(residual_shre)#5上海
residual_szre_01<-pobs(residual_szre)#6深圳
residual_mlre_01<-pobs(residual_mlre)#7马来西亚
residual_flbre_01<-pobs(residual_flbre)#8菲律宾
residual_tgre_01<-pobs(residual_tgre)#9泰国
residual_ynnre_01<-pobs(residual_ynnre)#10印尼
residual_ynre_01<-pobs(residual_ynre)#11越南
residual_xjpre_01<-pobs(residual_xjpre)#12新加坡
########检验0-1分布
ks.test(residual_rbre_01,runif(1000))
ks.test(residual_hgre_01,runif(1000))
ks.test(residual_xgre_01,runif(1000))
ks.test(residual_ydre_01,runif(1000))
ks.test(residual_shre_01,runif(1000))
ks.test(residual_szre_01,runif(1000))
ks.test(residual_mlre_01,runif(1000))
ks.test(residual_flbre_01,runif(1000))
ks.test(residual_tgre_01,runif(1000))
ks.test(residual_ynnre_01,runif(1000))
ks.test(residual_ynre_01,runif(1000))
ks.test(residual_xjpre_01,runif(1000))
######收益率copula拟合
recopula<-data.frame(residual_rbre_01,residual_hgre_01,
residual_xgre_01,residual_ydre_01,
residual_shre_01,residual_szre_01,
residual_mlre_01,residual_flbre_01,
residual_tgre_01,residual_ynnre_01,
residual_ynre_01,residual_xjpre_01)
RVM_re<-RVineStructureSelect(recopula,familyset = NA,type=0,progress = TRUE)
plot(RVM_re)
######bsadf copula拟合
######
acf(rbadf)
pacf(rbadf)
acf(hgadf)
pacf(hgadf)
acf(xgadf)
pacf(xgadf)
acf(ydadf)
pacf(ydadf)
acf(shadf)
pacf(shadf)
acf(szadf)
pacf(szadf)
acf(mladf)
pacf(mladf)
acf(flbadf)
pacf(flbadf)
acf(tgadf)
pacf(tgadf)
acf(ynnadf)
pacf(ynnadf)
acf(ynadf)
pacf(ynadf)
acf(xjpadf)
pacf(xjpadf)
####ar(1)
arma_rbadf<-arma(rbadf,order = c(1,0))
arma_hgadf<-arma(hgadf,order = c(1,0))
arma_xgadf<-arma(xgadf,order = c(1,0))
arma_ydadf<-arma(ydadf,order = c(1,0))
arma_shadf<-arma(shadf,order = c(1,0))
arma_szadf<-arma(szadf,order = c(1,0))
arma_mladf<-arma(mladf,order = c(1,0))
arma_xjpadf<-arma(xjpadf,order = c(1,0))
arma_ynnadf<-arma(ynnadf,order = c(1,0))
arma_ynadf<-arma(ynadf,order = c(1,0))
arma_tgadf<-arma(tgadf,order = c(1,0))
arma_flbadf<-arma(flbadf,order = c(1,0))
#####
##ARCH效应检验
ArchTest(residuals(arma_rbadf))
ArchTest(residuals(arma_hgadf))
ArchTest(residuals(arma_xgadf))
ArchTest(residuals(arma_ydadf))
ArchTest(residuals(arma_shadf))
ArchTest(residuals(arma_szadf))
ArchTest(residuals(arma_mladf))
ArchTest(residuals(arma_ynnadf))
ArchTest(residuals(arma_ynadf))
ArchTest(residuals(arma_tgadf))
ArchTest(residuals(arma_xjpadf))
ArchTest(residuals(arma_flbadf))
####存在ARCH效应,即存在异方差
#由于gjr-garch在gamma=0时会退化至garch,所以我们选择gjr-garch建模
gjr_t.spec<-ugarchspec(variance.model = list(model="gjrGARCH",
garchOrder=c(1,1)), mean.model = list(armaOrder=c(1,0)),
distribution.model = "std" )
###分别使用了norm,std,ged后选择t
ar_gjr_rbadf<-ugarchfit(spec=gjr_t.spec,data=rbadf)
ar_gjr_hgadf<-ugarchfit(spec=gjr_t.spec,data=hgadf)
ar_gjr_xgadf<-ugarchfit(spec=gjr_t.spec,data=xgadf)
ar_gjr_ydadf<-ugarchfit(spec=gjr_t.spec,data=ydadf)
ar_gjr_shadf<-ugarchfit(spec=gjr_t.spec,data=shadf)
ar_gjr_szadf<-ugarchfit(spec=gjr_t.spec,data=szadf)
ar_gjr_mladf<-ugarchfit(spec=gjr_t.spec,data=mladf)
ar_gjr_tgadf<-ugarchfit(spec=gjr_t.spec,data=tgadf)
ar_gjr_ynnadf<-ugarchfit(spec=gjr_t.spec,data=ynnadf)
ar_gjr_ynadf<-ugarchfit(spec=gjr_t.spec,data=ynadf)
ar_gjr_xjpadf<-ugarchfit(spec=gjr_t.spec,data=xjpadf)
ar_gjr_flbadf<-ugarchfit(spec=gjr_t.spec,data=flbadf)
####标准残差
#标准化残差
residual_rbadf<-residuals(ar_gjr_rbadf,standardize=TRUE)
residual_hgadf<-residuals(ar_gjr_hgadf,standardize=TRUE)
residual_xgadf<-residuals(ar_gjr_xgadf,standardize=TRUE)
residual_ydadf<-residuals(ar_gjr_ydadf,standardize=TRUE)
residual_shadf<-residuals(ar_gjr_shadf,standardize=TRUE)
residual_szadf<-residuals(ar_gjr_szadf,standardize=TRUE)
residual_mladf<-residuals(ar_gjr_mladf,standardize=TRUE)
residual_tgadf<-residuals(ar_gjr_tgadf,standardize=TRUE)
residual_ynnadf<-residuals(ar_gjr_ynnadf,standardize=TRUE)
residual_ynadf<-residuals(ar_gjr_ynadf,standardize=TRUE)
residual_xjpadf<-residuals(ar_gjr_xjpadf,standardize=TRUE)
residual_flbadf<-residuals(ar_gjr_flbadf,standardize=TRUE)
##数据处理
residual_rbadf<-data.frame(residual_rbadf)[,1]
residual_hgadf<-data.frame(residual_hgadf)[,1]
residual_xgadf<-data.frame(residual_xgadf)[,1]
residual_ydadf<-data.frame(residual_ydadf)[,1]
residual_shadf<-data.frame(residual_shadf)[,1]
residual_szadf<-data.frame(residual_szadf)[,1]
residual_tgadf<-data.frame(residual_tgadf)[,1]
residual_xjpadf<-data.frame(residual_xjpadf)[,1]
residual_ynadf<-data.frame(residual_ynadf)[,1]
residual_ynnadf<-data.frame(residual_ynnadf)[,1]
residual_mladf<-data.frame(residual_mladf)[,1]
residual_flbadf<-data.frame(residual_flbadf)[,1]
############
install.packages("mFilter")
####概率积分变换
rbadf_01<-pobs(residual_rbadf)#1日本
hgadf_01<-pobs(residual_hgadf)#2韩国
xgadf_01<-pobs(residual_xgadf)#3香港
ydadf_01<-pobs(residual_ydadf)#4印度
shadf_01<-pobs(residual_shadf)#5上海
szadf_01<-pobs(residual_szadf)#6深圳
mladf_01<-pobs(residual_mladf)#7马来西亚
flbadf_01<-pobs(residual_flbadf)#8菲律宾
tgadf_01<-pobs(residual_tgadf)#9泰国
ynnadf_01<-pobs(residual_ynnadf)#10印尼
ynadf_01<-pobs(residual_ynadf)#11越南
xjpadf_01<-pobs(residual_xjpadf)#12新加坡
##k-s检验
ks.test(rbadf_01,runif(1000))
ks.test(hgadf_01,runif(1000))
ks.test(xgadf_01,runif(1000))
ks.test(ydadf_01,runif(1000))
ks.test(shadf_01,runif(1000))
ks.test(szadf_01,runif(1000))
ks.test(mladf_01,runif(1000))
ks.test(flbadf_01,runif(1000))
ks.test(tgadf_01,runif(1000))
ks.test(ynnadf_01,runif(1000))
ks.test(ynadf_01,runif(1000))
ks.test(xjpadf_01,runif(1000))
######adf-copula拟合
adfcopula<-data.frame(rbadf_01,hgadf_01,
xgadf_01,ydadf_01,
shadf_01,szadf_01,
mladf_01,flbadf_01,
tgadf_01,ynnadf_01,
ynadf_01,xjpadf_01)
adf<-data.frame(rbadf,hgadf,xgadf,ydadf,shadf,szadf,mladf,flbadf,tgadf,ynnadf,ynadf,xjpadf)
adf0_1<-apply(adf,2,pobs)
ks<-function(a){
ks.test(a,runif(10000))
}
apply(adf0_1, 2, ks)
###
RVM_adf<-RVineStructureSelect(adf0_1,familyset = NA,type=0,progress = TRUE)
|
253df967031156270df9eaa381f994db6c7f0165 | 9da1a7d2f925c855ea096e8d334cb3960e168feb | /man/process_reads-function.Rd | 6a80200d32be4f0a6cda7a0845417d03b5e6c57c | [] | no_license | rpolicastro/gostripes | 6f31621da63fd9930fb55cd5be7e841a1235661a | 285c58efaf8a298dc05f36d7a7a31844258d1859 | refs/heads/master | 2020-12-21T05:01:31.697569 | 2020-12-10T19:09:13 | 2020-12-10T19:09:13 | 236,315,120 | 3 | 1 | null | 2020-12-10T19:09:15 | 2020-01-26T13:21:52 | R | UTF-8 | R | false | true | 2,151 | rd | process_reads-function.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fastq_processing.R
\name{process_reads}
\alias{process_reads}
\title{Process Reads}
\usage{
process_reads(go_obj, outdir, contamination_fasta, cores = 1)
}
\arguments{
\item{go_obj}{gostripes object}
\item{outdir}{output directory for filtered and trimmed reads}
\item{contamination_fasta}{fasta file containing contaminants to remove, such as rRNA}
\item{cores}{Number of CPU core/threads to use}
}
\value{
gostripes object and processed fastq files
}
\description{
This function will perform quality control and the appropriate processing steps for STRIPE-seq data.
First, proper R1 read structure is checked by looking for 'NNNNNNNNTATAGGG'.
This corresponds to the 8 base UMI, the spacer (TATA), and the ribo-Gs (GGG) used for template switching.
Second, UMI-tools is used to trim and stash the 8 base UMI into the FASTQ read name.
Third, the remaining spacer and ribo-Gs are trimmed.
Finally, TagDust2 is used to remove any contaminant reads, such as rRNA.
}
\details{
There will be several output files generated in the specified \strong{outdir}.
'stashed_*' FASTQ files are create after the UMI is added to the read name.
'trimmed_*' FASTQ files have the spacer and ribo-Gs trimmed off.
'decon_*' FASTQ files have the contaminants removed,
and are the files used in read alignment.
The \strong{contamination_fasta} file must be a properly formatted FASTA file.
It is recommended for this file to contain at least rRNA reads,
as they tend to be the most common and confounding contaminant.
Try to limit the number of contaminants as a large number
of entries may cause slow performance on this step.
}
\examples{
R1_fastq <- system.file("extdata", "S288C_R1.fastq", package = "gostripes")
R2_fastq <- system.file("extdata", "S288C_R2.fastq", package = "gostripes")
rRNA <- system.file("extdata", "Sc_rRNA.fasta", package = "gostripes")
sample_sheet <- tibble::tibble(
"sample_name" = "stripeseq", "replicate_ID" = 1,
"R1_read" = R1_fastq, "R2_read" = R2_fastq
)
go_object <- gostripes(sample_sheet) \%>\%
process_reads("./scratch/cleaned_fastq", rRNA)
}
|
085e01728c333201ab89d0056e16e9df67da76c8 | ebab6c7b1d1192822a4d7a32262c8d716180e33d | /plot1.R | 8e0b12557675954d41f291cca1c88b4608d72d9e | [] | no_license | lgy-hz/Exploratory-Data-Analysis-Project-1 | 5d719e4ef29d9a45696fc403f24c8333436651df | 98eaddf07047fd37444887e1655cb87757c0dfbb | refs/heads/master | 2016-09-03T01:38:18.688467 | 2015-08-09T07:51:55 | 2015-08-09T07:51:55 | 40,429,454 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 482 | r | plot1.R |
file<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file, destfile= "data.zip",method="curl")
unzip("data.zip")
a<- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings= "?",stringsAsFactors = FALSE)
b<-a[a$Date== "1/2/2007",]
b<-rbind(b,a[a$Date== "2/2/2007",])
png("plot1.png",width = 480, height = 480)
with(b,hist(Global_active_power,col="red",main= "Global Active Power"))
dev.off()
|
189fd823843d943c1e9f6de1b2e2e8b646644542 | 6f0e911ea8753d23bc8fbdfe15758768586838fa | /R/normalize_functions.R | cbaec715185aed26dd4adf0d8e527e44cf0bfc62 | [] | no_license | kdaily/MEMA | 0223092dd491eea7383b374a11dfb15ee43a7893 | 7d22734de9caf673406576d2a5e63810996011e9 | refs/heads/master | 2020-04-13T11:40:19.329909 | 2015-08-02T21:57:06 | 2015-08-02T21:57:06 | 42,480,633 | 0 | 0 | null | 2015-09-14T22:28:18 | 2015-09-14T22:28:17 | R | UTF-8 | R | false | false | 3,058 | r | normalize_functions.R | #Normalization functions for processing MEMAs
#' Normalize the proliferation ratio signal to the collagen 1 values
#' @param x a dataframe or datatable with columns names ProliferatioRatio
#' and ShortName. ShortName must include at least one entry of COL1 or COL I.
#' @return The input dataframe of datatable with a normedProliferation column that has the ProliferationRatio values divided by the median collagen
#' 1 proliferation value
#' @export
normProfToCol1 <- function(x){
col1Median <- median(x$ProliferationRatio[x$ShortName %in% c("COL1", "COL I")],na.rm = TRUE)
normedProliferation <- x$ProliferationRatio/col1Median
}
#' Normalize to a base MEP
#'
#' Normalizes one channel of values for all MEPs in a multi-well plate to one
#' base MEP.
#'
#' @param DT A \code{data.table} that includes a numeric value column to be
#' normalized, a \code{ECMpAnnotID} column that has the printed ECM names and a
#' \code{Growth.Factors} or \code{LigandAnnotID}column that has the growth factor names.
#' @param value The name of the column of values to be normalized
#' @param baseECM A regular expression for the name or names of the printed ECM(s) to be normalized against
#' @param baseGF A regular expression for the name or names of the soluble growth factors to be normalized against
#' @return A numeric vector of the normalized values
#'
#' @section Details: \code{normWellsWithinPlate} normalizes the value column of
#' all MEPs by dividing the median value of the replicates of the MEP that
#' is the pairing of baseECM with baseGF.
#' @export
normWellsWithinPlate <- function(DT, value, baseECM, baseGF) {
if(!c("ECMpAnnotID") %in% colnames(DT)) stop(paste("DT must contain a ECMpAnnotID column."))
if(!c(value) %in% colnames(DT)) stop(paste("DT must contain a", value, "column."))
if("LigandAnnotID" %in% colnames(DT)){
valueMedian <- median(unlist(DT[(grepl(baseECM, DT$ECMpAnnotID) & grepl(baseGF,DT$LigandAnnotID)),value, with=FALSE]), na.rm = TRUE)
} else if (c("Growth.Factors") %in% colnames(DT)) {
valueMedian <- median(unlist(DT[(grepl(baseECM, DT$ECMpAnnotID) & grepl(baseGF,DT$Growth.Factors)),value, with=FALSE]), na.rm = TRUE)
} else stop (paste("DT must contain a Growth.Factors or LigandAnnotID column."))
normedValues <- DT[,value,with=FALSE]/valueMedian
return(normedValues)
}
#' Create a median normalized loess model of an array
#'
#'@param data A dataframe with ArrayRow, ArrayColumn and signal intensity columns
#'@param value The column name of the signal intensity column
#'@param span The span value passed to loess. Values between 0 and 1 determine the
#'proportion of the population to be included in the loess neighborhood.
#'@return a vector of median normalized loess values of the signal
#'@export
loessModel <- function(data, value, span){
dataModel <- loess(as.formula(paste0(value," ~ ArrayRow+ArrayColumn")), data,span=span)
dataPredicted <- predict(dataModel)
predictedMedian <- median(dataPredicted, na.rm = TRUE)
dataNormed <- dataPredicted/predictedMedian
}
|
56d45b71505c634e5ea4d68973b7414a15356a6d | f9099901637a813dfb3d39ed8f9dc06dddb1e8f5 | /R/00_testing.R | 620fbb019937209f4bc9e512b52a7d081691d1e5 | [] | no_license | jessdiallo/testing | 8f4a1c72983103ca154269defc256cac53479e9f | e28e8aabeda236e777809504a2de86129fb3c778 | refs/heads/main | 2023-02-25T20:32:57.994338 | 2021-01-23T20:09:16 | 2021-01-23T20:09:16 | 328,758,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 95 | r | 00_testing.R | # this is a test script
## addition
1 + 2
## multiplication
1 * 2
## subtraction
1 - 2
|
3ae5cd551484b56d72dc14363053071759ae384b | 08d26deb861c447fa86dc7f4d58ac2b17e1905d3 | /cachematrix.R | 2adbcdbc38629c22b8f147459298c886b527c533 | [] | no_license | youngokkim/ProgrammingAssignment2 | e06e4290a7e8076fc84eafd2f28ed9582e7b5a1b | 5cf57df0c3f21edef6682d7e39779ef92c8cddee | refs/heads/master | 2021-01-01T19:08:12.705885 | 2017-07-28T03:45:34 | 2017-07-28T03:45:34 | 98,514,334 | 0 | 0 | null | 2017-07-27T08:47:58 | 2017-07-27T08:47:58 | null | UTF-8 | R | false | false | 1,699 | r | cachematrix.R | ## Assignments Part2 for R-Programming Week3
## @written by Youngok Kim, joylife052@gmail.com
## makeCacheMatrix creates a special "matrix", which is really a list containing
## a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse matrix
## 4. get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(inv) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve calculates the inverse matrix of the special "matrix" created
## with the makeCacheMatrix() function. However, it first checks to see if the
## inverse matrix has already been calculated. If so, it gets the inverse matrix
## from the cache and skips the computation. Otherwise, it checks data is square
## matrix and inverse exists. if so, it calcualtes the inverse matrix of data
## and sets the value of the inverse matrix in the cache via the setsolve()
## function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
if(nrow(data) == ncol(data)) { # check square matrix, or not
if(det(data) != 0) { # if exist inverse matrix
m <- solve(data, ...)
x$setsolve(m)
} else{
message("does not exist inverse matrix")
}
} else {
message("not square matrix")
}
m
}
|
47826e56f8b3eb1babc15aa1e0417de58afc436e | aafb44d8881e86da345ee0438f73e200b840a778 | /R/select.sample.group.R | abc58650a1cccdfeb7e76307dd19bad586486c5e | [] | no_license | cran/RPPanalyzer | 57738f8309a4b42115bc7119eaf5936f6dd72c47 | 7c6bfda1bfd4989a8afffed015f18f64a696c489 | refs/heads/master | 2023-08-31T15:52:38.080682 | 2023-08-28T12:30:02 | 2023-08-28T13:30:37 | 17,682,834 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,584 | r | select.sample.group.R | #'
#' The function selects subgroups of samples based on different parameters which are listed in the sampledescription
#' @param params: a list of sampledescription column names and associated values
#' @combine: logical value indicating if the union (combine is TRUE) or the intersect (combine is FALSE) should be considered. The default value is FALSE.
#'
`select.sample.group` <-
function(x,params=list(tissue=c("T", "N")), combine=F) {
# check if every params are column names of the sampledescription
if(!all(names(params) %in% colnames(x[[4]]))) {
stop("Following parameters are not contained in the sampledescription: ", paste(names(params)[!(names(params) %in% colnames(x[[4]]))],collapse=" "), "\n")
}
# create an initial index vector depending if the
# the boolean indices should be combined (logical OR) or intersect (logical AND)
if(combine) {
dat.lines <- rep(F, nrow(x[[4]]))
}
else {
dat.lines <- rep(T, nrow(x[[4]]))
}
# iterate over all given column names
for (p in names(params)) {
temp.lines <- x[[4]][,p] %in% params[[p]]
if (combine) {
dat.lines <- dat.lines | temp.lines
}
else {
dat.lines <- dat.lines & temp.lines
}
}
# use th index vector to filter the matrix with the expression values and the variances
x[[1]] <- x[[1]][dat.lines,]
x[[2]] <- x[[2]][dat.lines,]
x[[4]] <- x[[4]][dat.lines,]
# return the filtered matrix
return(x)
}
|
c1a27d93d500658159f10b4509dfc5033ea1e132 | 7be81350dd4f0e33d675ba5ac316cf96774a6fed | /clasesGustavo/TareasHogar/Tarea20210917/111_rpart_default.r | 424eecb89d6a78c6f60b1c63a68eba1a59af00b0 | [] | no_license | gerbeldo/labo2021 | aa3ccb20501099de6ca3bb5f05afdddc1fa63764 | 97796e15fd77ab2b19fcd731f42378980ad659d7 | refs/heads/master | 2023-08-21T15:53:31.767994 | 2021-10-20T14:46:53 | 2021-10-20T14:46:53 | 400,648,848 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 963 | r | 111_rpart_default.r | #limpio la memoria
rm(list=ls()) #remove all objects
gc() #garbage collection
#Arbol elemental con libreria rpart
require("data.table")
require("rpart")
setwd("~/buckets/b1/") #Establezco el Working Directory
#cargo los datos de 202011 que es donde voy a ENTRENAR el modelo
dtrain <- fread("./datasetsOri/paquete_premium_202011.csv")
#genero el modelo
modeloA <- rpart("clase_ternaria ~ .",
data= dtrain,
xval= 0,
cp= -1 )
#aplico el modeloA a los datos de 202101
dapply <- fread("./datasetsOri/paquete_premium_202101.csv")
prediccionA <- predict( modeloA, dapply , type = "prob")
dapply[ , prob_baja2 := prediccionA[, "BAJA+2"] ]
dapply[ , Predicted := as.numeric(prob_baja2 > 0.025) ]
entregaA <- dapply[ , list( numero_de_cliente, Predicted) ]
fwrite( entregaA, file="./kaggle/111_rpart_default.csv", sep="," )
#ahora debo subir la prediccion a Kaggle y ver como me fue
|
a554cc0b8a4549b693a7c1f1ab94cd95fd690b7e | ec5db8e0e525c5198b59a14ac0c7ac00864fde28 | /scripts/R-scripts/basic_smooth-norm.R | b3e723b70a004cdecb2b834ac5197f16e2c1bad6 | [] | no_license | ChromatinCardiff/DanielPassProcessingPipelines | bdc1a02f6ec0fb99387db85574cdeefe7c247525 | c3b02255a69f81c888f5cba14fb1b031ce99dc2e | refs/heads/master | 2021-06-02T15:13:18.958935 | 2020-01-29T01:07:27 | 2020-01-29T01:07:27 | 32,514,237 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 531 | r | basic_smooth-norm.R | library(ggplot2)
library(zoo)
require(scales)
library(plyr)
x <- read.table("~/Projects/ALD/ALD_histogram.txt", header=FALSE)
summary(x)
fn <- function(x) x/sum(x)
### Normalise column V3
x.nor <- ddply(x, "V1", transform, V3norm=fn(V3))
### SMOOTHING
x$av <- ave(x.nor$V3norm, x.nor$V1, FUN= function(x) rollmean(x, k=30, fill=NA))
x$lav <-log10(x$av)
summary(x)
p <- ggplot(data=x, aes(V2, lav, colour=V1))
p +
geom_line() +
scale_x_continuous(breaks = pretty_breaks(n=12)) +
scale_colour_brewer(palette="Dark2")
|
d8d45b6184c293666345f9476004e74c9cf0b52b | 9c57c741b59f615f7c786da87338402eca05f8e6 | /2-Absent_bones.R | 77810f6d3137d72ae23cd27277efc68400858778 | [
"MIT"
] | permissive | AgneseLan/ontogeny-asymmetry-dolphin | 4861b193346bdd793c112b00230c5616cafcc55d | db9e69d4cef5de94aba15b422eee38dae300307f | refs/heads/main | 2023-04-06T20:11:37.967266 | 2022-07-19T16:29:28 | 2022-07-19T16:29:28 | 451,504,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,315 | r | 2-Absent_bones.R |
#===========================================================#
# #
# CURVES AND POINTS ANALYSES - ODONTOCETE FAMILIES #
# #
#===========================================================#
#CH.2 - Assigning coordinates to landmarks of absent bones
#Code adapted from Ellen Coombs
#LOAD LIBRARIES ----
#always do this first!!
library(tidyverse)
library(Morpho)
library(geomorph)
library(Rvcg)
library(paleomorph)
library(EMMLi)
library(qgraph)
library(ape)
library(geiger)
library(abind)
library("devtools")
library(SURGE)
library(magick)
#ABSENT BONES ----
#Add the data for absent bones for specific species
#Both curves and fixed LMs
###SET WD to root from console!! -->
#Import LMs list - curves listed in curve_table
LM_table <- read_csv("Data/LMs.csv")
#Import sets of absent curves and LMs and open file to check what bones have absent data
absent_curves <- read.csv("Data/absent_curves.csv")
absent_LMs <- read.csv("Data/absent_LMs.csv")
View(absent_curves)
View(absent_LMs)
##Absent curves
#Look for bones with absent curves in curve list - column names
# colnames(absent_curves)
curve_nasal_l <- my_curves$Curves[which(curve_table$bone%in%c("nasal_l"))]%>%unlist(.)%>%unique(.)%>%sort(.)
curve_nasal_r <- my_curves$Curves[which(curve_table$bone%in%c("nasal_r"))]%>%unlist(.)%>%unique(.)%>%sort(.)
curve_interparietal <- my_curves$Curves[which(curve_table$bone%in%c("interparietal"))]%>%unlist(.)%>%unique(.)%>%sort(.)
curve_basioccipital_ll <- my_curves$Curves[which(curve_table$bone%in%c("basioccipital_ll"))]%>%unlist(.)%>%unique(.)%>%sort(.)
curve_basioccipital_lr <- my_curves$Curves[which(curve_table$bone%in%c("basioccipital_lr"))]%>%unlist(.)%>%unique(.)%>%sort(.)
#Create new object for absent curves
absentcurve <- slidedlms
absentcurve[curve_nasal_l,,4] #specimen number on the end, test if it worked - check specimen number form absent_curves file
#Loop to substitute coordinates for semilandmarks in absent bone curves
#Put first landmark of curve in matrix for each bone
#Left nasal
for (i in 1:nrow(absent_curves)){
if( !is.na(absent_curves$nasal_l[i]))
absentcurve[curve_nasal_l,c(1:3),i] <- matrix(absentcurve[6,c(1:3),i], nrow = length(curve_nasal_l), ncol=3, byrow=TRUE)
}
#Right nasal
for (i in 1:nrow(absent_curves)){
if( !is.na(absent_curves$nasal_r[i]))
absentcurve[curve_nasal_r,c(1:3),i] <- matrix(absentcurve[15,c(1:3),i], nrow = length(curve_nasal_r), ncol=3, byrow=TRUE)
}
#Interparietal
for (i in 1:nrow(absent_curves)){
if( !is.na(absent_curves$interparietal[i]))
absentcurve[curve_interparietal,c(1:3),i] <- matrix(absentcurve[57,c(1:3),i], nrow = length(curve_interparietal), ncol=3, byrow=TRUE)
}
#Basioccipital lateral left
for (i in 1:nrow(absent_curves)){
if( !is.na(absent_curves$basioccipital_ll[i]))
absentcurve[curve_basioccipital_ll,c(1:3),i] <- matrix(absentcurve[56,c(1:3),i], nrow = length(curve_basioccipital_ll), ncol=3, byrow=TRUE)
}
#Basioccipital lateral right
for (i in 1:nrow(absent_curves)){
if( !is.na(absent_curves$basioccipital_lr[i]))
absentcurve[curve_basioccipital_lr,c(1:3),i] <- matrix(absentcurve[54,c(1:3),i], nrow = length(curve_basioccipital_lr), ncol=3, byrow=TRUE)
}
absentcurve[curve_nasal_l,,4] #check if it worked with specimen number with missing curve
##Absent LMs
#Look for absent bones first
# colnames(absent_LMs)
LMs_nasal_l <- LM_table$lm[which(LM_table$bone%in%c("nasal_l"))]
LMs_nasal_r <- LM_table$lm[which(LM_table$bone%in%c("nasal_r"))]
LMs_interparietal <- LM_table$lm[which(LM_table$bone%in%c("interparietal"))]
#Create new object for absent LMs
absentLM <- absentcurve
absentLM[LMs_nasal_l,,4] #specimen number on the end, test if it worked - check specimen number form absent_LMs file
#Loop to substitute coordinates for landmarks in absent bones
#Left nasal
for (i in 1:nrow(absent_LMs)){
if( !is.na(absent_LMs$nasal_l[i]))
absentLM[LMs_nasal_l,c(1:3),i] <- matrix(absentLM[6,c(1:3),i], nrow = length(LMs_nasal_l), ncol=3, byrow=TRUE) #number (40) here is the LM that is missing
}
#Right nasal
for (i in 1:nrow(absent_LMs)){
if( !is.na(absent_LMs$nasal_r[i]))
absentLM[LMs_nasal_r,c(1:3),i] <- matrix(absentLM[15,c(1:3),i], nrow = length(LMs_nasal_r), ncol=3, byrow=TRUE)
}
#Interparietal
for (i in 1:nrow(absent_LMs)){
if( !is.na(absent_LMs$interparietal[i]))
absentLM[LMs_interparietal,c(1:3),i] <- matrix(absentLM[57,c(1:3),i], nrow = length(LMs_interparietal), ncol=3, byrow=TRUE)
}
absentLM[LMs_nasal_l,,4] #specimen number on the end, test if it worked - check specimen number form absent_LMs file
absentLM[curve_nasal_l,,4] #check curves still ok
#Create new object for analyses with all missing data, include only shape data
final_dataset <- absentLM
#Check plotting of absent bones
#Look up number for specimens with absent bones in absent_curves and absent_LMs
###SET WD to ply from console!! -->
#Plot
checkLM(final_dataset, path="", pt.size = 15, suffix=".ply", render = "s", begin = 50, point = "p")
#List of points and curves for different bones - useful for plots
nasals <- c(LMs_nasal_l, LMs_nasal_r, curve_nasal_l, curve_nasal_r)
supraoccipital <- c(LM_table$lm[which(LM_table$bone%in%c("supraoccipital"))],
my_curves$Curves[which(curve_table$bone%in%c("supraoccipital"))]) %>% unlist(.)%>%unique(.)%>%sort(.)
basioccipital <- c(LM_table$lm[which(LM_table$bone%in%c("basioccipital", "basioccipital_lr", "basioccipital_ll"))],
my_curves$Curves[which(curve_table$bone%in%c("basioccipital", "basioccipital_ll", "basioccipital_lr"))]) %>%
unlist(.)%>%unique(.)%>%sort(.)
maxilla <- c(LM_table$lm[which(LM_table$bone%in%c("maxilla"))],
my_curves$Curves[which(curve_table$bone%in%c("maxilla"))]) %>% unlist(.)%>%unique(.)%>%sort(.)
premaxilla <- c(LM_table$lm[which(LM_table$bone%in%c("premaxilla"))],
my_curves$Curves[which(curve_table$bone%in%c("premaxilla"))]) %>% unlist(.)%>%unique(.)%>%sort(.)
condyles <- c(LM_table$lm[which(LM_table$bone%in%c("condyle"))],
my_curves$Curves[which(curve_table$bone%in%c("condyle"))])%>% unlist(.)%>%unique(.)%>%sort(.)
orbit <- c(LM_table$lm[which(LM_table$bone%in%c("frontal", "jugal"))],
my_curves$Curves[which(curve_table$bone%in%c("frontal"))]) %>% unlist(.)%>%unique(.)%>%sort(.)
squamosal <- c(LM_table$lm[which(LM_table$bone%in%c("squamosal"))],
my_curves$Curves[which(curve_table$bone%in%c("squamosal"))]) %>% unlist(.)%>%unique(.)%>%sort(.)
palatine <- c(LM_table$lm[which(LM_table$bone%in%c("palatine"))],
my_curves$Curves[which(curve_table$bone%in%c("palatine"))]) %>% unlist(.)%>%unique(.)%>%sort(.)
interparietal <- c(LMs_interparietal, curve_interparietal)
exoccipital <- c(LM_table$lm[which(LM_table$bone%in%c("exoccipital"))],
my_curves$Curves[which(curve_table$bone%in%c("exoccipital"))]) %>% unlist(.)%>%unique(.)%>%sort(.)
#Check
spheres3d(final_dataset[nasals,,30], radius=1, color = "red")
spheres3d(final_dataset[-nasals,,30], radius=1, color = "grey")
#Save coordinates to file
save(final_dataset, file = "~/final_dataset.RData")
######
#Next - ch. 3 - GPA and PCA
|
80a82b1c55b4c6addda7ac380ad450f177622cdb | ed370813a204a903d8c5f951e50c89080a30f725 | /tests/test-all.R | 7c9528a7ee70de84d5aa92448f5140a6247239e1 | [] | no_license | jayemerson/STV | f174b56529596d02c77ccaf85533a833da0d97d3 | 32940b4e746ded9e69f5f9d35233555337f76e63 | refs/heads/master | 2021-07-18T10:39:34.498545 | 2021-02-01T00:12:06 | 2021-02-01T00:12:06 | 91,092,742 | 3 | 5 | null | 2018-02-17T23:03:11 | 2017-05-12T13:09:50 | R | UTF-8 | R | false | false | 36 | r | test-all.R | library(testthat)
test_check("STV")
|
8c4362b8c3bf1d0b43f7e04794292ff3c76e3bdd | 2b7696de761986e7c295da36201f06fca701f059 | /man/hs5_hs2.Rd | 2b3b1adcc8b1646d5b1710b9cc4c8e9acd8d91f5 | [] | no_license | cran/concordance | 130b5cadccfce9cc5ef98432fc2f938c75eebd93 | b8d1e592399f05941ce24a4afd96007b8dae0ec5 | refs/heads/master | 2021-05-04T11:23:30.586684 | 2020-04-24T15:10:08 | 2020-04-24T15:10:08 | 49,413,285 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 648 | rd | hs5_hs2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hs5_hs2}
\alias{hs5_hs2}
\title{HS5-HS2 Concordance}
\format{
A data frame with 5388 rows and 6 variables:
\describe{
\item{HS5_6d}{6-digit HS5 Code}
\item{HS5_4d}{4-digit HS5 Code}
\item{HS5_2d}{2-digit HS5 Code}
\item{HS2_6d}{6-digit HS2 Code}
\item{HS2_4d}{4-digit HS2 Code}
\item{HS2_2d}{2-digit HS2 Code}
}
}
\source{
\url{https://unstats.un.org/unsd/trade/classifications/correspondence-tables.asp}
}
\usage{
hs5_hs2
}
\description{
A dataset containing concordances between HS5 and HS2 classification.
}
\keyword{datasets}
|
83bd07beffaecda944fb316fc2f057bd5c1b44d4 | 808e37074a3652ea10ae384f4747bd9b2e3607fd | /man/df_cea_psa.Rd | 1e92c421062e28217d4895134d055f7c17b63c96 | [
"MIT"
] | permissive | fthielen/ce16_modelling_course | 248a9eab42d32009e9b417a0fe44e339bf410717 | 62bb04618abfd5ff603b128885b68cda8dc52d9d | refs/heads/master | 2023-05-05T17:17:50.542522 | 2021-05-17T10:03:49 | 2021-05-17T10:03:49 | 368,138,798 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 801 | rd | df_cea_psa.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_cea_psa.R
\docType{data}
\name{df_cea_psa}
\alias{df_cea_psa}
\title{Cost-effectiveness results from probabilistic analysis}
\format{A \code{data.frame} with with 2 rows, ane per strategy and
5 variables:
\describe{
\item{Strategy}{Strategy name}
\item{Cost}{Cost per strategy}
\item{Effect}{QALYs per strategy}
\item{Inc_Cost}{Incremental cost}
\item{Inc_Effect}{Incremental QALYs}
\item{ICER}{Incremental cost-effectivenes ratio (ICER)}
\item{Status}{Domination status. ND, not dominated (i.e., on the
cost-effectivenes efficiency frontier); D, strongly dominated; d,
dominated by extension}
}}
\usage{
df_cea_psa
}
\description{
A dataset with cost and effectiveness outputs for each strategy.
}
\keyword{datasets}
|
024032019b326a61867393dc582a10fac9d45dc0 | eb3d7cbbb4ded421fa211384f5ee1df251646577 | /R/geos-misc.R | 62fa966af2a6543f896bc3934f49c6a4585ebdb2 | [] | no_license | SymbolixAU/geom | f2ab70e698881079ec5c09ac7c6ee7d74420bf87 | 45a913ddb3942807635547ce471a08d0e5f3af62 | refs/heads/master | 2021-05-19T19:02:29.902255 | 2020-04-01T04:52:55 | 2020-04-01T04:52:55 | 252,074,507 | 0 | 0 | null | 2020-04-01T04:46:56 | 2020-04-01T04:46:55 | null | UTF-8 | R | false | false | 837 | r | geos-misc.R |
#' Area, length, and distance
#'
#' @inheritParams geos_intersection
#'
#' @return
#' - [geos_area()] computes areas for polygons, or returns 0 otherwise.
#' - [geos_length()] computes the length of the boundary for polygons, or the length
#' of the line for linestrings.
#' - [geos_distance()] returns the smallest possible distance between the two
#' geometries.
#'
#' @export
#'
#' @examples
#' geos_area(geo_wkt("POLYGON ((0 0, 10 0, 0 10, 0 0))"))
#' geos_length(geo_wkt("POLYGON ((0 0, 10 0, 0 10, 0 0))"))
#' geos_distance(
#' geo_wkt("POLYGON ((0 0, 10 0, 0 10, 0 0))"),
#' geo_wkt("POINT (10 10)")
#' )
#'
geos_area <- function(x) {
cpp_area(x)
}
#' @rdname geos_area
#' @export
geos_length <- function(x) {
cpp_length(x)
}
#' @rdname geos_area
#' @export
geos_distance <- function(x, y) {
cpp_distance(x, y)
}
|
aa053186bde1c6e149a3a02c9305dae93c13dcfa | 28dcec41c7bf186f4ecee1ec5a20903c8839b65d | /NOBUILD/Sandbox/segall.R | fc845ddd59c939b1f0e91be0c7e2332c76c65502 | [] | no_license | abarbour/deform | f0cc416521fd5984636a86f23c5381962d9f2238 | 0a54d77b8d19d8efb8e944fe2e107aab0c6eb830 | refs/heads/master | 2022-03-20T06:36:41.132055 | 2022-02-07T21:57:23 | 2022-02-07T21:57:23 | 25,497,729 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,859 | r | segall.R | library(kook)
library(plyr)
library(TeachingDemos)
library(deform)
.x. <- sort(unique(c(-7:-3, seq(-3.90,3.90,by=0.15), 3:7)))
su <- surface_displacement(.x.*1e3, C.=1e13, z_src=0.7e3)
sut <- with(su, Tilt(x, z=uz))
sue <- with(su, Uniaxial_extension(x, X=ux))
F1 <- function(){
plot(c(NA,diff(ux)/diff(x),NA) ~ c(NA,x), su, type="s")
abline(v=0,h=0,col="grey")
}
F1()
F2 <- function(){
plot(uz ~ x, su, col=NA, ylim=c(-1,1)*20, xlim=c(-1,1)*7*1e3)
abline(v=0,h=0,col="grey")
lines(uz ~ x, su, type="l", pch=16, cex=1, lwd=2, col="grey")
text(2e3, -5, expression(U[Z]))
lines(ux ~ x, su, type="l", pch=16, col="blue", cex=1, lwd=2)
text(0, 6, expression(U[X]), col="blue", pos=2)
points(uxz.mag ~ x, su, col="red", pch=16, cex=0.6)
text(-3e3, 8, expression(abs(U[XZ])), col="red")
suppressWarnings(my.symbols(x=su$x, y=su$uxz.mag,
ms.arrows,
adj=0, col="red", inches=0.8, add=TRUE, angle=su$uxz.ang*pi/180))
lines(ztilt*1e2 ~ x, sut, type="h", lwd=5, col="lightgreen")
text(-1.1e3, 2, expression("Tilt" == 2%*%dU[Z]/dx), col="dark green", pos=2)
lines(ztilt*1e2 ~ x, sut, lwd=3,col="dark green")
lines(dXdx*1e2 ~ x, sue, type="h", lwd=4, col="grey60")
text(5e2, 2, expression(E[ee] == dU[X]/dx), pos=4)
lines(dXdx*1e2 ~ x, sue, lwd=3)
}
F2()
# Figs 7,8
mxx <- 50
.x.km. <- sort(unique(c((-1*mxx):-3, seq(-2.90,2.90,by=0.1), 3:mxx)))
.z.km. <- sort(unique(c(seq(0,3,by=0.25),seq(3,12,by=0.75))))
yr <- 365*86400
.time. <- seq(2,10,by=2)*10*yr
.Vdot. <- 2e6/yr # volume rate m^3/yr to m^3/s
.D. <- 1e3 # depth of burial
.L. <- 10e3 # length (Vdot/L is the average rate of fluid extraction per unit length)
.B. <- 0.6 # Skemptons coeff
.c. <- 0.1 # hydraulic diffusivity m^2/s
.Sources.x. <- 1e3*c(0)
.TwoSources.x. <- 1e3*c(0,20)
# for mass computation
.t. <- 100 # thickness
.phi. <- 0.2 #porosity
# for pressure computation
.mu. <- 5.6 #GPa -- shear modulus
# single source
zz2 <- timevarying_surface_displacement(.x.km.*1e3, .time., .Vdot., .B., .L., .D., .c., Pt.Sources.x=.Sources.x.)
F3 <- function(){
#matplot(.time./yr, t(zz2)*1e3, type="l", main="Subsidence, mm, Segall 1985, Fig 8B")
matplot(.x.km., zz2*1e3, type="l", col="black", main="Subsidence, mm, Segall 1985, Fig 8B", sub=Sys.time())
}
try(F3())
# multiple sources
zz2 <- timevarying_surface_displacement(.x.km.*1e3, .time., c(1,0.5)*.Vdot., .B., .L., .D., .c., Pt.Sources.x=.TwoSources.x.)
try(F3())
zz2t <- apply(zz2, 2, function(.z.) matrix(Tilt(.x.km.*1e3, z=.z.)$ztilt))
F3t <- function(){
#matplot(.time./yr, t(zz2t), type="l")
matplot(.x.km., zz2t*1e6, type="l", col="black", main="Tilt")
}
try(F3t())
zz3 <- timevarying_fluidmass(.x.km.*1e3, .time., .Vdot., .L., .t., .c., phi.=.phi.)
F4 <- function(){
#matplot(.time./yr, t(zz3)*1e2, type="l")
matplot(.x.km., zz3*1e2, type="l", col="black", main="t.v. Fluid mass change")
}
try(F4())
redo <- FALSE
if (!exists("zzp") | redo) zzp <- timevarying_porepressure(.x.km.*1e3, .z.km.*1e3, .time., .Vdot.*c(1,2), .B., .L., .D., .c., .t., .mu., Pt.Sources.x=.TwoSources.x.)
F5 <- function(do.log=FALSE){
#matplot(.time./yr, t(zz3)*1e2, type="l")
X<- zzp[,,length(.time.)]
if (do.log) X <- log10(abs(X))
matplot(x=.x.km., X, col=NA, main="t.v. Pore pressure")
aaply(zzp, 3, .fun = function(X) {
if (do.log) X <- log10(abs(X))
matplot(x=.x.km., X, type="l", add=TRUE)
return("x")
})
invisible()
}
#try(F5())
F5c <- function(){
layout(matrix(seq_len(dim(zzp)[3]), nrow=1))
aaply(zzp, 3, .fun = function(X) {
image(x=.x.km., y=.z.km., X, ylim=c(6,0), col = brewerRamp())
contour(x=.x.km., y=.z.km., X, ylim=c(6,0), add = TRUE)
abline(v=.TwoSources.x./1e3, col="grey", lwd=2)
abline(h=(.D.+c(-1*.t.,.t.)/2)/1e3, col="grey", lwd=2)
return("x")
})
invisible()
layout(matrix(1))
}
try(F5c())
|
6b103889658aef26588ee3c6cb8c946b905a1476 | 89491fef8c724a2500434f220780f3300017ff38 | /inst/pruebas/demoIRIS.R | 60083c2badbbcd44d18d6d65fa3e0c8340f5b1c3 | [] | no_license | cran/FKBL | ccafa5c7acbc14abad415b641d7d3e29004a658b | ec6c9300a8c01950db07ff57a93940b98936ed48 | refs/heads/master | 2016-09-05T20:16:08.923166 | 2007-03-31T00:00:00 | 2007-03-31T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 101 | r | demoIRIS.R |
source("script.R")
IRIS=read.table("../data/IRIS.tab")
#data(IRIS)
salida<-EXPERIMENT(IRIS)
salida$e |
ebcc8cb2ac23f50564310136e7043b632f2e8da7 | 571e295f9ad4ca5762f9ca05bbc4a51b29e97d7b | /STRING/stringSERVER.R | 81f89329fb11db9e4fe4427e1f65586fcc375701 | [] | no_license | estayless/comparison-matrices-admin | 8a2aab4086692d87dae26d2e4974786cd937fcae | d5531054f7f16f98863e4c1b2caed176fea84d67 | refs/heads/master | 2023-02-15T08:58:42.416078 | 2020-12-30T10:18:15 | 2020-12-30T10:18:15 | 325,518,749 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,212 | r | stringSERVER.R | # linksDetailed<-reactive({
# req(input$linksDetailed)
# df<-read.delim(input$linksDetailed$datapath, header = TRUE, sep = "")
# print(df)
# })
# #SERA LA TRADUCCION CON PROTEIN INFO
# proteinInfo<-reactive({
# req(input$proteinInfo)
# df<-read.delim(input$proteinInfo$datapath, header = TRUE, sep = "\t")
# print(df)
# })
executeStringPopulation<-observeEvent(input$exeStringPop,{
# out<-tryCatch({stringPopulation(input$userSTRING, input$passwordSTRING, linksDetailed(), proteinInfo(), martData="hsapiens_gene_ensembl")},
# error = function(err) {
# errorCatch$val<-as.character(err)
# message("Error: Authentication failed.")
# },
# finally = invalidateLater(1))
print(input$linksDetailed)
print(input$proteinInfo)
showModal(modalDialog("Reading files", footer=NULL))
linksDetailed<-read.delim(as.character(input$linksDetailed$datapath), header = TRUE, sep = "")
proteinInfo<-read.delim(as.character(input$proteinInfo$datapath), header = TRUE, sep = "\t")
removeModal()
stringPopulation(input$userSTRING, input$passwordSTRING, linksDetailed, proteinInfo, martData="hsapiens_gene_ensembl")
}) |
fddb2c08fb24e9c8d505e0ac4bcb8d5f7f978e27 | 2764167b5743be62adadc491ec7dfde210e0703d | /man/BASICTOPOMAP.Rd | e2f32e9e2b91429b7cf2f9d6aa9e187942a42c0d | [] | no_license | cran/GEOmap | 528a4cbe293211d324405037eb280b415e65f62e | 0149894022496cee8237868b0bb693d00ef01e41 | refs/heads/master | 2023-08-18T14:47:52.021469 | 2023-08-13T12:40:21 | 2023-08-13T13:30:31 | 17,713,753 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,013 | rd | BASICTOPOMAP.Rd | \name{BASICTOPOMAP}
\alias{BASICTOPOMAP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Basic Topogrpahy Map}
\description{
Basic Topogrpahy Map
}
\usage{
BASICTOPOMAP(xo, yo, DOIMG, DOCONT, UZ, AZ, IZ, perim, PLAT, PLON,
PROJ = PROJ, pnts = NULL, GRIDcol = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{xo}{vector of x-coordinates}
\item{yo}{vector of y-coordinates}
\item{DOIMG}{logical, add image}
\item{DOCONT}{logical, add contours}
\item{UZ}{matrix of image values under sea level}
\item{AZ}{matrix of image values above sea level}
\item{IZ}{matrix of image values}
\item{perim}{perimeter vectors}
\item{PLAT}{latitudes for tic-marks}
\item{PLON}{longitude for tic-marks}
\item{PROJ}{projection list}
\item{pnts}{points to add to plot}
\item{GRIDcol}{color for grid}
}
\details{
Image is processed prior to calling
}
\value{
Graphical Side effects
}
\author{Jonathan M. Lees<jonathan.lees.edu>}
\seealso{DOTOPOMAPI, GEOTOPO}
\examples{
\dontrun{
library(geomapdata)
library(MBA) ## for interpolation
####### set up topo data
data(fujitopo)
##### set up map data
data('japmap', package='geomapdata' )
### target region
PLOC= list(LON=c(138.3152, 139.0214),
LAT=c(35.09047, 35.57324))
PLOC$x =PLOC$LON
PLOC$y =PLOC$LAT
#### set up projection
PROJ = setPROJ(type=2, LAT0=mean(PLOC$y) , LON0=mean(PLOC$x) )
########## select data from the topo data internal to the target
topotemp = list(lon=fujitopo$lon, lat= fujitopo$lat, z=fujitopo$z)
#### project target
A = GLOB.XY(PLOC$LAT , PLOC$LON , PROJ)
####### select topo
selectionflag = topotemp$lat>+PLOC$LAT[1] & topotemp$lat<=PLOC$LAT[2] &
topotemp$lon>+PLOC$LON[1] & topotemp$lon<=PLOC$LON[2]
### project topo data
B = GLOB.XY( topotemp$lat[selectionflag] ,topotemp$lon[selectionflag] , PROJ)
### set up out put matrix:
### xo = seq(from=range(A$x)[1], to=range(A$x)[2], length=200)
### yo = seq(from=range(A$y)[1], to=range(A$y)[2], length=200)
####### interpolation using akima
### IZ = interp(x=B$x , y=B$y, z=topotemp$z[selectionflag] , xo=xo, yo=yo)
DF = cbind(x=B$x , y=B$y , z=topotemp$z[selectionflag])
IZ = mba.surf(DF, 200, 200, extend=TRUE)$xyz.est
xo = IZ[[1]]
yo = IZ[[2]]
### image(IZ)
####### underwater section
UZ = IZ$z
UZ[IZ$z>=0] = NA
#### above sea level
AZ = IZ$z
AZ[IZ$z<=-.01] = NA
#### create perimeter:
perim= getGEOperim(PLOC$LON, PLOC$LAT, PROJ, 50)
### lats for tic marks:
PLAT = pretty(PLOC$LAT)
PLAT = c(min(PLOC$LAT),
PLAT[PLAT>min(PLOC$LAT) & PLAT<max(PLOC$LAT)],max(PLOC$LAT))
PLON = pretty(PLOC$LON)
### main program:
DOIMG = TRUE
DOCONT = TRUE
PNTS = NULL
BASICTOPOMAP(xo, yo , DOIMG, DOCONT, UZ, AZ, IZ, perim, PLAT, PLON,
PROJ=PROJ, pnts=NULL, GRIDcol=NULL)
### add in the map information
plotGEOmapXY(japmap, LIM=c(PLOC$LON[1], PLOC$LAT[1],PLOC$LON[2],
PLOC$LAT[2]) , PROJ=PROJ, add=TRUE )
}
}
\keyword{misc}
|
c1e817433dc4b5f53b73a3c4dfdec1e07cf8a61d | 161747aed56bfc7fbd17b87c60c25291ef12a579 | /CRAN_meta_analysis/server.R | f3c7fafec7aad03a5ffcadda6e79aaecd3ec2c47 | [] | no_license | juschu321/CRAN_meta | 62f3dc210eecd1684c2cb4d11531ff0bdcfca65f | 333375a95cd6606e16fc06b0c03b78516fc090ed | refs/heads/master | 2020-06-02T22:43:02.452187 | 2019-07-16T15:40:55 | 2019-07-16T15:40:55 | 191,332,470 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,298 | r | server.R | library(shiny)
library(shinydashboard)
library(ggplot2)
library(plotly)
server <- function(input, output, session) {
aggreggated_timeseries_data <- reactive({
selected_ctvs <- input$ctvs_select
selected_packages <- input$packages_select
date_slider <- input$year
selected_from <- input$year[1]
selected_to <- input$year[2]
filtered_data <-
filter_timeseries_data(
selected_from = selected_from,
selected_to = selected_to,
selected_packages = selected_packages,
selected_ctv = selected_ctvs
)
aggregated_data <- aggregate_timeseries_data(filtered_data = filtered_data)
aggregated_data
})
selected_ctvs <- reactive({
selected_ctvs <- input$ctvs_select
selected_ctvs
})
output$ctvs_select <- renderText({
selected_ctvs()
})
output$plot <- renderPlot({
data = aggreggated_timeseries_data()
ggplot(data) +
geom_line(aes (month, total, color= data$ctv)) +
scale_x_date(
date_breaks = "1 year",
date_minor_breaks = "1 month",
date_labels = "%Y - %m"
)
})
output$value <- renderPrint({
input$checkboxGroup
})
}
|
590ca89e0c65c4f9e7998882b5e03bf20b31e600 | 3998d54ca79a8382685426844d7c22d4fbb4429a | /setup_400m_aquifer_tube_model_domain/codes/ert_inland_bc.R | 570f58157006c78d164143cb3a339bcdbbbbc78e | [] | no_license | xuehangsong/dense_array | 5bbe30275a647536e880d7751e2f67b9c2c2c78d | 2a7584bc8e945fa8bd251a47048bb43fa96166b5 | refs/heads/master | 2021-07-16T02:33:44.584572 | 2020-09-19T17:58:02 | 2020-09-19T17:58:02 | 212,211,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,115 | r | ert_inland_bc.R | ## #This file is used for calculating transient boundary conditions
## #using universal kriging
###cov_model_sets = c('gaussian','wave','exponential','spherical')
###drift_sets = c(0,1)
rm(list=ls())
library(geoR)
library(rhdf5)
source("codes/ert_parameters.R")
H5close()
options(geoR.messages=FALSE)
input_folder = 'data/headdata4krige_Plume_2009_2017/'
output_folder = "ert_model/"
initial.h5 = "H_Initial_ERT.h5"
BC.h5 = "H_BC_ERT.h5"
## for grids
grid.x = 1.0
grid.y = 1.0
grid.nx = diff(range_x)/grid.x
grid.ny = diff(range_y)/grid.y
pred.grid.south = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),range_y[1]+grid.y/2) # for South boundary
pred.grid.north = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),range_y[2]-grid.y/2) # for North boundary
pred.grid.east = expand.grid(range_x[1]+grid.x/2,seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for East boundary
pred.grid.west = expand.grid(range_x[2]-grid.x/2,seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for West boundary
pred.grid.domain = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),
seq(range_y[1]+grid.y/2,range_y[2],grid.y))
colnames(pred.grid.south)=c('x','y')
colnames(pred.grid.north)=c('x','y')
colnames(pred.grid.east)=c('x','y')
colnames(pred.grid.west)=c('x','y')
colnames(pred.grid.domain)=c('x','y')
## time information
start.time = as.POSIXct("2015-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2017-07-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
dt = 3600 ##secs
times = seq(start.time,end.time,dt)
ntime = length(times)
time.id = seq(0,ntime-1,dt/3600) ##hourly boundary
origin.time = as.POSIXct("2008-12-31 23:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
## BC.south = array(NA,c(ntime,grid.nx))
## BC.north = array(NA,c(ntime,grid.nx))
## BC.east = array(NA,c(ntime,grid.ny))
## BC.west = array(NA,c(ntime,grid.ny))
BC.south = c()
BC.north = c()
BC.east = c()
BC.west = c()
avail.time.id = c()
for (itime in 1:ntime)
{
print(times[itime])
index = paste(as.character(difftime(times[itime],origin.time,tz="GMT",units="hours")),
"_",format(times[itime],"%d_%b_%Y_%H_%M_%S"),sep="")
data = read.table(paste(input_folder,'time',index,'.dat',sep=''),header=F,na.strings = "NaN")
if (!all(is.na(data[,3])))
{
avail.time.id = c(avail.time.id,time.id[itime])
### The x/y column is swaped.
### The reason is the data file from Huiying is also reversed (why?)
data[,c(1,2,3)] = data[,c(2,1,3)]
colnames(data) = c('x','y','z')
data = as.geodata(data)
##This bins and esimator.type is defined by Xingyuan
if (nrow(data$coords)>27) {
bin1 = variog(data,uvec=c(0,50,100,seq(150,210,30),250,300),trend='cte',bin.cloud=T,estimator.type='modulus')
} else {
bin1 = variog(data,uvec=c(0,100,seq(150,210,30),250,300),trend='cte',bin.cloud=T,estimator.type='modulus')
}
initial.values <- expand.grid(max(bin1$v),seq(300))
wls = variofit(bin1,ini = initial.values,fix.nugget=T,nugget = 0.00001,fix.kappa=F,cov.model='exponential')
##check the varigram
if (itime %% 1000 == 1) {
jpeg(filename=paste('figures/Semivariance Time = ',format(times[itime],"%Y-%m-%d %H:%M:%S"),".jpg",sep=''),
width=5,height=5,units="in",quality=100,res=300)
plot(bin1,main = paste('Time = ',format(times[itime],"%Y-%m-%d %H:%M:%S"),sep=''),col='red', pch = 19, cex = 1, lty = "solid", lwd = 2)
text(bin1$u,bin1$v,labels=bin1$n, cex= 0.7,pos = 2)
lines(wls)
dev.off()
}
## Generate boundary and initial condition
kc.south = krige.conv(data, loc = pred.grid.south, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
kc.north = krige.conv(data, loc = pred.grid.north, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
kc.east = krige.conv(data, loc = pred.grid.east, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
kc.west = krige.conv(data, loc = pred.grid.west, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
BC.south = rbind(BC.south,kc.south$predict)
BC.north = rbind(BC.north,kc.north$predict)
BC.east = rbind(BC.east,kc.east$predict)
BC.west = rbind(BC.west,kc.west$predict)
if (itime==1)
{
kc.domain = krige.conv(data, loc = pred.grid.domain, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
h.initial = as.vector(kc.domain$predict)
dim(h.initial) = c(grid.nx,grid.ny)
}
}
}
time.id = avail.time.id
##Generate the initial condition hdf5 file for the domain.
if (file.exists(paste(output_folder,initial.h5,sep=''))) {
file.remove(paste(output_folder,initial.h5,sep=''))
}
h5createFile(paste(output_folder,initial.h5,sep=''))
h5createGroup(paste(output_folder,initial.h5,sep=''),'Initial_Head')
h5write(t(h.initial),paste(output_folder,initial.h5,sep=''),
'Initial_Head/Data',level=0)
fid = H5Fopen(paste(output_folder,initial.h5,sep=''))
h5g = H5Gopen(fid,'/Initial_Head')
h5writeAttribute(attr = 1.0, h5obj = h5g, name = 'Cell Centered')
h5writeAttribute.character(attr = "XY", h5obj = h5g, name = 'Dimension')
h5writeAttribute(attr = c(grid.x,grid.y), h5obj = h5g, name = 'Discretization')
h5writeAttribute(attr = 500.0, h5obj = h5g, name = 'Max Buffer Size')
h5writeAttribute(attr = c(range_x[1],range_y[1]), h5obj = h5g, name = 'Origin')
H5Gclose(h5g)
H5Fclose(fid)
##Generate the BC hdf5 file.
if (file.exists(paste(output_folder,BC.h5,sep=''))) {
file.remove(paste(output_folder,BC.h5,sep=''))
}
h5createFile(paste(output_folder,BC.h5,sep=''))
### write data
h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_South')
h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_South/Times',level=0)
h5write(BC.south,paste(output_folder,BC.h5,sep=''),'BC_South/Data',level=0)
h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_North')
h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_North/Times',level=0)
h5write(BC.north,paste(output_folder,BC.h5,sep=''),'BC_North/Data',level=0)
h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_East')
h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_East/Times',level=0)
h5write(BC.east,paste(output_folder,BC.h5,sep=''),'BC_East/Data',level=0)
h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_West')
h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_West/Times',level=0)
h5write(BC.west,paste(output_folder,BC.h5,sep=''),'BC_West/Data',level=0)
### write attribute
fid = H5Fopen(paste(output_folder,BC.h5,sep=''))
h5g.south = H5Gopen(fid,'/BC_South')
h5g.north = H5Gopen(fid,'/BC_North')
h5g.east = H5Gopen(fid,'/BC_East')
h5g.west = H5Gopen(fid,'/BC_West')
h5writeAttribute(attr = 1.0, h5obj = h5g.south, name = 'Cell Centered')
h5writeAttribute(attr = 'X', h5obj = h5g.south, name = 'Dimension')
h5writeAttribute(attr = grid.x, h5obj = h5g.south, name = 'Discretization')
h5writeAttribute(attr = 200.0, h5obj = h5g.south, name = 'Max Buffer Size')
h5writeAttribute(attr = range_x[1], h5obj = h5g.south, name = 'Origin')
h5writeAttribute(attr = 'h', h5obj = h5g.south, name = 'Time Units')
h5writeAttribute(attr = 1.0, h5obj = h5g.south, name = 'Transient')
h5writeAttribute(attr = 1.0, h5obj = h5g.north, name = 'Cell Centered')
h5writeAttribute(attr = 'X', h5obj = h5g.north, name = 'Dimension')
h5writeAttribute(attr = grid.x, h5obj = h5g.north, name = 'Discretization')
h5writeAttribute(attr = 200.0, h5obj = h5g.north, name = 'Max Buffer Size')
h5writeAttribute(attr = range_x[1], h5obj = h5g.north, name = 'Origin')
h5writeAttribute(attr = 'h', h5obj = h5g.north, name = 'Time Units')
h5writeAttribute(attr = 1.0, h5obj = h5g.north, name = 'Transient')
h5writeAttribute(attr = 1.0, h5obj = h5g.east, name = 'Cell Centered')
h5writeAttribute(attr = 'Y', h5obj = h5g.east, name = 'Dimension')
h5writeAttribute(attr = grid.y, h5obj = h5g.east, name = 'Discretization')
h5writeAttribute(attr = 200.0, h5obj = h5g.east, name = 'Max Buffer Size')
h5writeAttribute(attr = range_y[1], h5obj = h5g.east, name = 'Origin')
h5writeAttribute(attr = 'h', h5obj = h5g.east, name = 'Time Units')
h5writeAttribute(attr = 1.0, h5obj = h5g.east, name = 'Transient')
h5writeAttribute(attr = 1.0, h5obj = h5g.west, name = 'Cell Centered')
h5writeAttribute(attr = 'Y', h5obj = h5g.west, name = 'Dimension')
h5writeAttribute(attr = grid.y, h5obj = h5g.west, name = 'Discretization')
h5writeAttribute(attr = 200.0, h5obj = h5g.west, name = 'Max Buffer Size')
h5writeAttribute(attr = range_y[1], h5obj = h5g.west, name = 'Origin')
h5writeAttribute(attr = 'h', h5obj = h5g.west, name = 'Time Units')
h5writeAttribute(attr = 1.0, h5obj = h5g.west, name = 'Transient')
H5Gclose(h5g.south)
H5Gclose(h5g.north)
H5Gclose(h5g.east)
H5Gclose(h5g.west)
H5Fclose(fid)
save(list=ls(),file="results/ert.bc.r")
|
dab6ffbad1231225544f313bce1e417cf7bee723 | 921ef582b7c321e6cb94dd7f0a5b2404f0410c22 | /B1/Frequencies.Crosstabs.Descriptives_v2.R | 3e120c0946cebed52033957097c2d728be8d6455 | [] | no_license | thomasns2/rmodules | c7e12dc0682acfa3b6d093a5c40e30e503ea7e90 | b4ac3ca0806cdaadc3c12399b48d5f11009b563e | refs/heads/main | 2023-04-30T23:14:26.105167 | 2021-05-06T19:19:36 | 2021-05-06T19:19:36 | 360,587,142 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,099 | r | Frequencies.Crosstabs.Descriptives_v2.R | #########################################################
#########################################################
#iii. Frequencies, Crosstabs and Descriptives
#########################################################
#########################################################
getwd() # this function can be used to find your current working directory
#this is where R is looking to find files
#this directory contains the data we will work with
setwd("/Users/thomasns/Documents/0DevInternship/Modules/Modules")
#########################################################
#########################################################
#reading in data
#https://www.cdc.gov/nchs/nhis/2019nhis.htm
#the data can be downloaded from the link above
#the downloaded file should be saved in the directory specified above in setwd()
#########################################################
####################################################
df<-read.table("adult19.csv", #the dataset file in the current working directory
sep=",", #the character that separates values in the file (a comma here, because the file is csv)
header=TRUE) #does the file have column names as the first row?
#Descriptions of the 3 variables we will use in this demonstration are provided below
####################################################
#WEARGLSS_A
#Wear glasses/contact lenses
#1 Yes
#2 No
#7 Refused
#8 Not Ascertained
#9 Don't Know
#VISIONDF_A
#Do you have difficulty seeing, even when wearing glasses or contact lenses?
#1 No difficulty
#2 Some difficulty
#3 A lot of difficulty
#4 Cannot do at all
#7 Refused
#8 Not Ascertained
#9 Don't Know
#HEARAID_A
#Use hearing aid
#1 Yes
#2 No
#7 Refused
#8 Not Ascertained
#9 Don't Know
#HEARINGDF_A
#Do you have difficulty hearing even when using your hearing aid(s)?
#1 No difficulty
#2 Some difficulty
#3 A lot of difficulty
#4 Cannot do at all
#7 Refused
#8 Not Ascertained
#9 Don't Know
#First, we subset our 4 variables of interest
df2 <- df[,c("WEARGLSS_A", "VISIONDF_A", "HEARAID_A", "HEARINGDF_A")]
####################################################
#1. table(), describe(), and count()
####################################################
#Here we will examine three basic functions for exploring your data
#########################################################
#table()
#table() can be used to create a frequency distribution of a single column
#this may be familiar, as we have used table() in earlier modules as well
#be sure to use the "exclude=NULL" operator to include missing values in the frequency distribution
table(df2$WEARGLSS_A, exclude=NULL)
table(df2$VISIONDF_A, exclude=NULL)
table(df2$HEARAID_A, exclude=NULL)
table(df2$HEARINGDF_A, exclude=NULL)
#we can also save the output of table as a data.frame for later use
#column 1 of the table will list values, column 2 lists frequencies
WEARGLSS_A_Table<-data.frame(table(df2$WEARGLSS_A, exclude=NULL))
#referring to the coding scheme above, we see that these variables include missing values under the codes 7,8, and 9
#we set these to NA and then check the frequency distributions again
df2$WEARGLSS_A[df2$WEARGLSS_A>=7] <- NA
df2$VISIONDF_A[df2$VISIONDF_A>=7] <- NA
df2$HEARAID_A[df2$HEARAID_A>=7] <- NA
df2$HEARINGDF_A[df2$HEARINGDF_A>=7] <- NA
#Note that the missing codes are now grouped under NA
table(df2$WEARGLSS_A, exclude=NULL)
table(df2$VISIONDF_A, exclude=NULL)
table(df2$HEARAID_A, exclude=NULL)
table(df2$HEARINGDF_A, exclude=NULL)
#we can use table() with two variables to generate a crosstab or crosstabulation of two variables
#crosstabs are useful for examining the overlap between the categories of two variables
#significance tests of the differences between the number of people in each cell will be discussed in a later module
#values of WEARGLSS_A are positioned on the y axis of the table
#values of VISIONDF_A are positioned on the x axis of the table
#the number of subjects overlapping under each combination of categories is shown in the body of the table
table(df2$WEARGLSS_A, df2$VISIONDF_A, exclude=NULL)
#for example 17450 subjects wear glasses (WEARGLSS_A==1) and do not have difficulty seeing with glasses (VISIONDF_A==1)
#values of HEARAID_A are positioned on the y axis of the table
#values of HEARINGDF_A are positioned on the x axis of the table
#the number of subjects overlapping under each combination of categories is shown in the body of the table
table(df2$HEARAID_A, df2$HEARINGDF_A, exclude=NULL)
#for example 734 subjects use a hearing aid (HEARAID_A==1) and do not have hearing with a hearing aid (HEARINGDF_A==1)
#Sometimes it is useful to save the frequencies from table() for the purposes of creating a plot
#If a table() object is saved as a data.frame, it will display values of the variable in one column and the associated frequencies in another
WEARGLSS_A_Table<-data.frame(table(df2$WEARGLSS_A, exclude=NULL))
WEARGLSS_A_Table#the values of WEARGLSS_A are in column 1, the associated frequencies are in column 2
#Frequencies associated with combinations of values can also be saved in this format by creating a data.frame from crosstabs
WEARGLSS_A_by_WEARGLSS_A_Table<-data.frame(table(df2$WEARGLSS_A, df2$VISIONDF_A, exclude=NULL))
WEARGLSS_A_by_WEARGLSS_A_Table#Values of WEARGLSS_A are in column 1, values of VISIONDF_A are in column 2, the associated frequencies are in column 3
#########################################################
#count()
#count() from the plyr package produces similar output as data.frame(table())
#however, count() is written to be more efficient that table() in some cases
#it is recommended to use this format if constructing tables to display conditional overlap between many variables
#remove the hashtag and run the line below only once
#install.packages("plyr", dependencies = TRUE)#install plyr and the packages that it depends on
library(plyr)#load plyr
WEARGLSS_A_by_WEARGLSS_A_Table2<-count(df2, #where the variables are stored
vars=c("WEARGLSS_A", "VISIONDF_A")#the variables to pull from df2
)#close the command
#count() also produces output that is labeled better than data.frame(table()) and excludes combinations where the frequency is 0
#see below
WEARGLSS_A_by_WEARGLSS_A_Table
WEARGLSS_A_by_WEARGLSS_A_Table2
#this procedure also works for tables of a single variable
WEARGLSS_A_Table2<-count(df2, vars=c("WEARGLSS_A"))
WEARGLSS_A_Table2
#we will return to these tables later when generating data visualizations
#for now, we will keep the output of count() and delete the output of data.frame(table())
rm(WEARGLSS_A_by_WEARGLSS_A_Table)
rm(WEARGLSS_A_Table)
#########################################################
#install.packages("psych", dependencies = TRUE)#install psych and the packages that it depends on
library(psych)#load psych
#describe()
#describe() from the psych package can be used to calculate descriptive statistics for many variables at once
#describe() will generate descriptive statistics for any variable, it is recommended to be cautious to use this function for numeric, rather than categorical, variables
#by default, describe will output mean, sd, median, trimmed mean, median absolute deviation from the median, minimum, maximum, skew, kurtosis, and standard error
describe(df2)#generate descriptives for the variables in df2
#note that WEARGLSS_A and HEARAID_A are binary and should be excluded from calculation of descriptive statistics
#VISIONDF_A and HEARINGDF_A are ordinal, but for the purposes of this demonstration we will treat them as continuous variables
DescriptivesTable<-describe(df2[,c("VISIONDF_A", "HEARINGDF_A")])
DescriptivesTable#describe() automatically generates output in data.frame form
#columns from the descriptives table can be extracted using the brackets operator
DescriptivesTable<-DescriptivesTable[,c("n", "mean", "sd")]
DescriptivesTable
####################################################
#2. sjPlot and stargazer packages for creating and exporting tables and plots
####################################################
#sjPlot is a convenient package for creating nicely formatted tables and plots
#sjPlot relies on ggplot2 to perform its functions, and so we will install that here as well
#sjPlot functions are designed with very specific kinds of tables or plots in mind
#there is limited flexibility with this workflow, but the functions are more user-friendly than some alternatives
#remove the hashtag and run this only once
#install.packages(c("sjPlot", "ggplot2"), dependencies = T)
#
library(sjPlot)
library(ggplot2)
#########################################################
#tab_stackfrq() can be used to generate frequency tables for multiple variables that have the same response options
#frequency table for variables with the same response options
APA_freqTable<-tab_stackfrq(df2[,c("VISIONDF_A", "HEARINGDF_A")], #identify the variables to be tabelled
var.labels = c("Vision Difficulty", "Hearing Difficulty"), #provide labels for the tables
value.labels = c("No difficulty","Some difficulty","A lot of difficulty","Cannot do at all"), #provide labels for the values of the variables
show.n=TRUE)#show N, in addition to percentage
APA_freqTable#the table is shown in the "Viewer" window in the bottom right corner
#if we add the "file=" argument, we can export the table out of R as an html file
#the file can be opened with a word processor like Word
APA_freqTable<-tab_stackfrq(df2[,c("VISIONDF_A", "HEARINGDF_A")], #identify the variables to be tabelled
var.labels = c("Vision Difficulty", "Hearing Difficulty"), #provide labels for the tables
value.labels = c("No difficulty","Some difficulty","A lot of difficulty","Cannot do at all"), #provide labels for the values of the variables
show.n=TRUE,#show N, in addition to percentage
file="APA_freqTable.doc")#a file name for the exported table object, to be opened with a word processor like Word
#########################################################
#sjPlot is also a convenient package for creating data visualizations
#there are many functions to quickly generate plots
#plot_frq() can be used to produce bar charts
#sjPlot uses the package ggplot2 to create visualizations, which we will discuss further in the next section
#generate a histogram of HEARINGDF_A
BarPlot<-plot_frq(df2$HEARINGDF_A,
title="Hearing Difficulty", #the title of the plot
type="bar", #the type of plot. other options include "bar", "dot", "histogram", "line", "density", "boxplot", "violin"
axis.title = "Category",#the axis title
axis.labels = c("No difficulty","Some difficulty","A lot of difficulty","Cannot do at all"))#the labels of the values of HEARINGDF
BarPlot#entering the plot object into the console will display it in the Plots tab in the lower right corner
#the bar plot can be exported by calling a graphics function
#TIFF files are lossless, meaning that they do not compress the image but maintain the highest image quality. This results in the larger file size.
#Journals will often request TIFF files. PNG and JPG are other options that will result in smaller file size, but lower image quality.
tiff("BarPlot1.tiff", width = 2250, height = 2550, units = 'px', res = 300)# TIFF image device is initialized for export to the current working directory. The width, height, units, and res argument can be adjusted to produce plots of varying size and resolution. These specifications match the journal Drug and Alcohol Dependence, but other journals may require other specifications.
BarPlot #Once the image device is initialized, printing the plot with send the plot to the image device.
dev.off()#close out the device with the dev.off() function.
#tab_stackfrq() and plot_frq() are just two examples of sjPlot functions
#often, there will be a function designed to match the table or plot you are looking for
#more information about these functions and the sjPlot package as a whole can be obtained using the following commands
?sjPlot
?plot_frq
?tab_stackfrq
?plot_model()
#########################################################
#stargazer
#stargazer is a more flexible package for producing tables
#any data.frame object can be converted into an APA format table using stargazer
#remove the hashtag and run this only once
#install.packages("stargazer", dependencies=TRUE)
#
library(stargazer)
stargazer(DescriptivesTable, #the object to create a table from
type="html", #html format to export into a Word document
summary=FALSE, #export the data.frame as-is. There are a variety of summary functions that can also be performed instead
out="DescriptivesTable.doc")#the output file
#open DescriptivesTable.doc in your working directory to see the table
####################################################
#3. Introduction to ggplot2 for visualizing data
####################################################
#sjPlot generates plots by calling the package ggplot2
#using ggplot2 directly allows for more flexibility in plot generation
#ggplot uses the following general structure to form commands
#ggplot(DATA, aes(AESTHETICS)) + GEOMETRY_FUNCTION()
#DATA is the data to plot. ggplot2 generally works better with summaries of your data than the raw data itself
#AESTHETICS are used to map the x axis, y axis, and legend of your plot
#GEOMETRY_FUCTION determines the kind of plot that ggplot will generate. There are different geometry functions for different plots
#########################################################
#first, we will generate a bar plot that is similar to the one we generated with sjPlot
#first, create a summary of your data to use as input
HEARINGDF_A_Frequencies<-count(df2$HEARINGDF_A)
HEARINGDF_A_Frequencies
#if you want to include the missing values, this value must be changed from NA or ggplot will remove the row
HEARINGDF_A_Frequencies[5,1] <- "Missing" #Change row 5 column 1 from NA to "Missing"
HEARINGDF_A_Frequencies
#########################################################
#A basic bar plot
ggplot(HEARINGDF_A_Frequencies, #the data to plot
aes(x=x, y=freq))+ #plot column "x" on the x axis and column "freq" on the y axis
geom_bar(stat="identity") #use a bar plot and plot the data without transformation or aggregation
#raw data can be supplied to ggplot2 if stat="identity" is changed to stat="bin" and the argument for the y axis in aes() is removed
#most ggplot2 functions work easier with summary data though, so for the sake of illustration all plots will be generated from summaries
#########################################################
#adding labels and colors to the bar plot
#this defines a new object "apatheme" that contains a series of options to generate a plot in APA format
#run these commands and add this to any plot
#from https://stackoverflow.com/questions/60591014/r-add-tweaks-to-interaction-plot-with-ggplot
apatheme<-theme_bw()+
theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_blank(),
axis.line=element_line(),
text=element_text(family='Times'),
legend.title=element_blank())
#ggplot2 indexes colors with numeric codes
#below is a sequence of color codes that are colorblind friendly, recommended for use in publications
#http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette
cbPalette <- c("#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7","#E69F00", "#999999")
#to plot labels instead of numeric codes on the x axis, we edit the data that we are plotting
#first, convert numeric values to text labels
HEARINGDF_A_Frequencies[1, "x"] <- "No difficulty" #row 1, column x
HEARINGDF_A_Frequencies[2, "x"] <- "Some difficulty" #row 2, column x
HEARINGDF_A_Frequencies[3, "x"] <- "A lot of difficulty" #row 3, #column x
HEARINGDF_A_Frequencies[4, "x"] <- "Cannot do at all" #row 4, column x
#next, convert the x column to a factor.
#By default, ggplot2 would reorder the bars in the plot by alphabetical order if it were a character vector.
#To keep an order that aligns with the labels numeric value, specify the levels of the factor in the desired order
HEARINGDF_A_Frequencies$x<-factor(HEARINGDF_A_Frequencies$x,
levels=c("No difficulty","Some difficulty","A lot of difficulty","Cannot do at all", "Missing"))
#other options can be defined in the call to ggplot()
BarPlot2<-ggplot(HEARINGDF_A_Frequencies, #the data to plot
aes(x=x, y=freq, fill=x))+ #plot column "x" on the x axis and column "freq" on the y axis, fill color of bars by levels of x
geom_bar(stat="identity")+#use a bar plot and plot the data without transformation or aggregation
ggtitle("Frequency Distribution")+ #main title
xlab("Difficulty Hearing")+ #x axis label
ylab("Frequency")+#y axis label
scale_fill_manual(values=cbPalette)+#use cbPalette to define colors associated with the levels of x
apatheme #apatheme options, from above
BarPlot2
tiff("BarPlot2.tiff", width = 2250, height = 2550, units = 'px', res = 300)# TIFF image device is initialized for export to the current working directory. The width, height, units, and res argument can be adjusted to produce plots of varying size and resolution. These specifications match the journal Drug and Alcohol Dependence, but other journals may require other specifications.
BarPlot2 #Once the image device is initialized, printing the plot with send the plot to the image device.
dev.off()#close out the device with the dev.off() function.
#a quick guide to many different options in ggplot can be found here
#https://rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf
#citations for the packages used in this module can be retrieved below
citation("plyr")
citation("psych")
citation("sjPlot")
citation("ggplot2")
citation("stargazer")
|
5a04e0f2ad9dbbbad78646220dd240286cd7efb8 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Biere/Counter/counter_r_2/counter_r_2.R | 896d630ebae53be24e9f8581ec11f61ca07bee5f | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 59 | r | counter_r_2.R | 985b9ea304afe4abc21bc75835226c81 counter_r_2.qdimacs 50 121 |
8607016166762af87859ac357ced6ecc2d3e59a5 | 1de64141140b62134beffb82aa9602baeea0e35e | /presentations_reveal/muses_material_survey/rust_survey/assets/likert.R | a0fcb31677a47886a01783a07f3d66a3684238ca | [
"MIT"
] | permissive | bgaster/bgaster.github.io | ea9f5d0333714a798e902d0d4c4b4b935f69c72a | bb6376df51c431b9ee813f1df9970058231ce6e6 | refs/heads/master | 2022-12-10T02:45:54.980203 | 2021-03-03T09:48:54 | 2021-03-03T09:48:54 | 194,642,249 | 0 | 0 | null | 2022-12-07T21:53:57 | 2019-07-01T09:28:33 | JavaScript | UTF-8 | R | false | false | 3,807 | r | likert.R | # Exaxmple visualization script for Muses Material Survey Likert data
# The actual graphs we want to produce will vary, depending on what we want to show
# for a given presentation of the data.
#
# Benedict R. Gaster
library(ggplot2)
library(dplyr)
library(sqldf)
library(ggpubr)
# Load Likert CSV files, appending all to a single table
likert_files = list.files("/Users/br-gaster/dev/bgaster.github.io/muses_material_survey/rust_survey/assets/data/likert/", pattern="*.csv")
survey <- do.call(rbind,lapply(
likert_files, function(f) {
read.csv(
paste("/Users/br-gaster/dev/bgaster.github.io/muses_material_survey/rust_survey/assets/data/likert",f, sep="/"),
sep = ",",
row.names=NULL)
}))
# Select the fields we care about for resulting plot
survey <- survey[,c("Category", "Gesture", "Material","Feeling","Answer")]
colnames(survey) <- c("category", "gesture", "material", "feeling", "answer")
# define the colors on the Likert scale, using the Muses color palatte
myColors <- c("#605fa4","#d989bc","#f5e5c1","#f3b73b","#dd4921","black")
# TAP gesture plot
tap_agg_table <- sqldf::sqldf(
paste("select gesture, material, category, feeling, SUM(answer) as total
from survey", "where material=1 and gesture='Tap'", "group by material, feeling, category", sep = " "))
tap_summarized_table <- tap_agg_table %>%
group_by(material) %>%
mutate(countT= sum(total)) %>%
group_by(category, add=TRUE) %>%
mutate(per=round(100*total/countT,2))
tap_summarized_table$category <- relevel(tap_summarized_table$category,"Strongly Disagree")
tap_summarized_table$category <- relevel(tap_summarized_table$category,"Disagree")
tap_summarized_table$category <- relevel(tap_summarized_table$category,"Neutral")
tap_summarized_table$category <- relevel(tap_summarized_table$category,"Agree")
tap_summarized_table$category <- relevel(tap_summarized_table$category,"Strongly Agree")
# Press gesture plot
tap_2_agg_table <- sqldf::sqldf(
paste("select gesture, material, category, feeling, SUM(answer) as total
from survey", "where material=2 and gesture='Tap'", "group by material, feeling, category", sep = " "))
tap_2_summarized_table <- tap_2_agg_table %>%
group_by(material) %>%
mutate(countT= sum(total)) %>%
group_by(category, add=TRUE) %>%
mutate(per=round(100*total/countT,2))
tap_2_summarized_table$category <- relevel(tap_2_summarized_table$category,"Strongly Disagree")
tap_2_summarized_table$category <- relevel(tap_2_summarized_table$category,"Disagree")
tap_2_summarized_table$category <- relevel(tap_2_summarized_table$category,"Neutral")
tap_2_summarized_table$category <- relevel(tap_2_summarized_table$category,"Agree")
tap_2_summarized_table$category <- relevel(tap_2_summarized_table$category,"Strongly Agree")
#actual plot creation
tap_1_plot <- ggplot(data = tap_summarized_table, aes(x =feeling , y = per, fill = category)) +geom_bar(stat="identity", width = 0.7) +scale_fill_manual (values=myColors) +coord_flip() + ylab("") + xlab("") +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold")) +ggtitle("Tap Gesture") +theme(plot.title = element_text(size = 20, face = "bold",hjust = 0.5))
tap_2_plot <- ggplot(data = tap_2_summarized_table, aes(x =feeling , y = per, fill = category)) +geom_bar(stat="identity", width = 0.7) +scale_fill_manual (values=myColors) +coord_flip() + ylab("Percentage") + xlab("") +theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold")) +ggtitle("") +theme(plot.title = element_text(size = 20, face = "bold",hjust = 0.5))
ggarrange(tap_1_plot, tap_2_plot,
labels = c("Material 1", "Material 2"),
ncol = 1, nrow = 2)
# save plot to PDF
ggsave(file = "survey_likert_output.pdf") |
2aeea9b269fe447927e97d63bbf0af1c34a0532b | bc504192da5aa37ccf2c40464942c3d5c56193d7 | /HW4s2020.R | 7b86833d257d64ddada4aec3c611c9d03ca147a3 | [] | no_license | danniecuiuc/frm | 6125daab1ddb6ceb7f50b6f1a9f99437238102ce | 12d632bad3283188378ee3f4623bc3b2382165c6 | refs/heads/master | 2022-10-19T13:55:07.987734 | 2020-06-08T14:58:32 | 2020-06-08T14:58:32 | 269,361,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,323 | r | HW4s2020.R | # Solution to Fin 567 Homework 4 spring 2020 Questions 1 and 2
# Does not include solution to Question 3 because that does not require R
#
library(fOptions) #needed to compute option values
library(MASS) #needed to simulate multivariate Normal rvs
library(mvtnorm) #needed to simulate multivariate t rvs
# Question 1 Monte Carlo VaR and ES using the Normal distribution
# We will first need to use the binomial model to compute the value of the portfolio
ABCcall0 <- CRRBinomialTreeOption(TypeFlag = "ca", S=101.17, X=100, Time = 21/252,
r=0.01, b=0.01, sigma=0.45, n=21, title = NULL, description = NULL)@price
ABCput0 <- CRRBinomialTreeOption(TypeFlag = "pa", S=101.17, X=100, Time = 21/252,
r=0.01, b=0.01, sigma=0.45, n=21, title = NULL, description = NULL)@price
DEFcall0 <- CRRBinomialTreeOption(TypeFlag = "ca", S=148.97, X=150, Time = 21/252,
r=0.01, b=0.01, sigma=0.37, n=21, title = NULL, description = NULL)@price
DEFput0 <- CRRBinomialTreeOption(TypeFlag = "pa", S=148.97, X=150, Time = 21/252,
r=0.01, b=0.01, sigma=0.37, n=21, title = NULL, description = NULL)@price
V0 <- -60*100*ABCcall0-60*100*ABCput0 - 40*100*DEFcall0 - 40*100*DEFput0
# (a) Compute the 5% MC VaR
set.seed(137)
mu = c(0.0005, 0.0004)
cov <- matrix(c(0.028^2,0.028*0.023*0.4,0.028*0.023*0.4,0.023^2), nrow=2, ncol=2)
n = 10000
returns = mvrnorm(n, mu = mu, Sigma = cov)
S <- matrix(rep(0,2*n), nrow= n, ncol = 2)
S[1:n,1]= 101.17*exp(returns[1:n,1])
S[1:n,2]= 148.97*exp(returns[1:n,2])
ABCcall=rep(0,n)
ABCput=rep(0,n)
DEFcall=rep(0,n)
DEFput=rep(0,n)
V <- rep(0,n)
# in the loop below, note that the remaining time to expiration is 20 days
for(i in 1:n){
ABCcall[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=S[i,1], X=100, Time = 20/252,
r=0.01, b=0.01, sigma=0.45, n=20, title = NULL, description = NULL)@price
ABCput[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=S[i,1], X=100, Time = 20/252,
r=0.01, b=0.01, sigma=0.45, n=20, title = NULL, description = NULL)@price
DEFcall[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=S[i,2], X=150, Time = 20/252,
r=0.01, b=0.01, sigma=0.37, n=20, title = NULL, description = NULL)@price
DEFput[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=S[i,2], X=150, Time = 20/252,
r=0.01, b=0.01, sigma=0.37, n=20, title = NULL, description = NULL)@price
}
V <- -60*100*ABCcall-60*100*ABCput - 40*100*DEFcall - 40*100*DEFput
PLQ1 = V - V0
MCVaRQ1 <- - quantile(PLQ1, 0.05)
# (b) Compute the expected shortfall
ESQ1 = -mean(PLQ1[PLQ1<=(-MCVaRQ1)])
#Question 2
# Monte Carlo VaR and expected shortfall using bivariate t distribution
# (a) Compute the 5% MC VaR
mu = c(0.0005, 0.0004)
cov <- matrix(c(0.028^2,0.028*0.023*0.4,0.028*0.023*0.4,0.023^2), nrow=2, ncol=2)
nu = 4
scale = ((nu-2)/nu)*cov #scale matrix input to mvrt() is smaller than cov matrix
n = 10000
returns = rmvt(n, sigma = scale, df = nu, delta = mu) # parameter sigma is the scale matrix
S <- matrix(rep(0,2*n), nrow= n, ncol = 2)
S[1:n,1]= 101.17*exp(returns[1:n,1])
S[1:n,2]= 148.97*exp(returns[1:n,2])
ABCcall=rep(0,n)
ABCput=rep(0,n)
DEFcall=rep(0,n)
DEFput=rep(0,n)
V <- rep(0,n)
for(i in 1:n){
ABCcall[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=S[i,1], X=100, Time = 20/252,
r=0.01, b=0.01, sigma=0.45, n=20, title = NULL, description = NULL)@price
ABCput[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=S[i,1], X=100, Time = 20/252,
r=0.01, b=0.01, sigma=0.45, n=20, title = NULL, description = NULL)@price
DEFcall[i] <- CRRBinomialTreeOption(TypeFlag = "ca", S=S[i,2], X=150, Time = 20/252,
r=0.01, b=0.01, sigma=0.37, n=20, title = NULL, description = NULL)@price
DEFput[i] <- CRRBinomialTreeOption(TypeFlag = "pa", S=S[i,2], X=150, Time = 20/252,
r=0.01, b=0.01, sigma=0.37, n=20, title = NULL, description = NULL)@price
}
V <- -60*100*ABCcall-60*100*ABCput - 40*100*DEFcall - 40*100*DEFput
PLQ2 = V - V0
MCVaRQ2 <- - quantile(PLQ2, 0.05)
# (b) Compute the expected shortfall
ESQ2 = -mean(PLQ2[PLQ2<=(-MCVaRQ2)])
|
6d06454bdeeea8a52c50f80db830861da4df3134 | 249afaa1ffe3d3b27906548ee468c95718276591 | /R/BioGeoBias_bias_correction.R | 56a6da01cdb79c58baa633ba0c5fec500918189a | [
"MIT"
] | permissive | JanLauGe/BioGeoBias | 01c6409d8ff3b4d3b1d12398e3debf05856ff775 | e07b2ddd54b4c1a24766d4d82bfb8379d6503fcf | refs/heads/master | 2021-01-11T17:02:50.097840 | 2018-10-20T21:30:01 | 2018-10-20T21:30:01 | 69,507,338 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,114 | r | BioGeoBias_bias_correction.R |
#' @title Bias Correction
#' @description Choose bias correction approach and generate bias correction data
#' @param species_name (character) a valid taxon name, presumably a species name.
#' @param target_rank (character) a taxon rank for the target group (optional).
#' @param kingdom (character) the kingdom of the species. Can be supplied to
#' avoid possible confusion when matching names. Should be one of c('animalia',
#' 'plantae','archaea','bacteria','fungi','protozoa','viruses') (optional).
#' @param lonMin minimum longitude of a rectancular bounding box restricting the
#' search for species occurrences of the target group (optional).
#' @param lonMax maximum longitude of a rectancular bounding box restricting the
#' search for species occurrences of the target group (optional).
#' @param latMin minimum latitude of a rectancular bounding box restricting the
#' search for species occurrences of the target group (optional).
#' @param latMax maximum latitude of a rectancular bounding box restricting the
#' search for species occurrences of the target group (optional).
#' @author Jan Laurens Geffert, \email{laurensgeffert@@gmail.com}
#' @details This function takes a species name and a rank, and returns a
#' dataset suitable for bias correction by invoking
#' \code{create_target_group_background_data} or
#' \code{create_bias_grid}, depending on the number of species occurrence
#' records available for the species and the spatial extent of the selection.
#' @keywords GBIF, sampling bias, SDM
#' @export
bias_correction <- function(
species_name,
target_rank = rgbif::taxrank(),
kingdom = c(NULL,
'animalia',
'plantae',
'archaea',
'bacteria',
'fungi',
'protozoa',
'viruses'),
lonMin = NULL,
lonMax = NULL,
latMin = NULL,
latMax = NULL,
breaks = NULL,
nbreaks = NULL,
limit = 200000){
# check valid variable values for the function
if(length(target_rank) > 1){
# If no target rank is given, use class as default
message('\nNo valid target rank supplied, using "class"...\n')
target_rank <- 'class'
}
if(class(target_rank) != 'character'){
# If invalid target rank is given, stop with error
stop('value supplied to argument "target_rank" is not a character. Try rgbif::taxrank() to get a summary of valid inputs for this argument')
}
target_rank = match.arg(target_rank)
if(class(species_name) != 'character'){
stop('value supplied to argument "species_name" is not a character. This should be the latin binomial of the species you want to model')
}
if(!is.null(kingdom)){
kingdom = match.arg(kingdom)
}
for(v in c(lonMin, lonMax, latMin, latMax)){
if(!is.numeric(v) & !is.integer(v) & !is.null(v)){
stop('values supplied to arguments lonMin, lonMax, latMin, latMax are not numeric. These should be numerical values giving the minimum and maximum longitute and latitude for the extent of the occurrence search.')
}
}
# If min & max coordinates are supplied, use spatial filter
latFilter <- ifelse(!is.null(latMin) & !is.null(latMax), TRUE, FALSE)
lonFilter <- ifelse(!is.null(lonMin) & !is.null(lonMax), TRUE, FALSE)
# GBIF name query --------------------------------------------------------------
# Get name key of the target group from gbif backbone taxonomy.
# kingdom is used if supplied, but ignored otherwise
message('\nChecking GBFI for target group taxon key...\n')
NameData <- name_backbone(
name = species_name,
kingdom = kingdom)
# Check if name_backbone returned a valid result
if(NameData$matchType == 'NONE') stop('No valid taxon key for the target species. Are you sure you provied a valid latin binomial name?')
# Check if name_backbone name matching confidence is 95%
else if(NameData$confidence < 95){
warning('Name matching confidence was less than 95%. Please make sure that the matched taxon is the one you want.')
message(paste0(
'Name matched with ', NameData$confidence, ' confidence \n'
))}
# Check if name_backbone returned a valid taxon key for target group
if(is.null(NameData[paste0(target_rank ,'Key')][[1]])){
stop('No valid taxon key for the target group. Perhaps you should try a different rank?')
}
# Get the taxon key of the target group
Key <- NameData[paste0(target_rank ,'Key')]
# Print information about the matched species
message(paste0(
'Name matched to:\n ',
NameData$scientificName,
', Taxon key: ',
NameData$usageKey))
# Print information about the matched target group
message(paste0(
'Target group selected:\n ',
target_rank, ' ', NameData[target_rank],
', Taxon key: ', Key), '\n')
# Get the number of occurrences in the target group
Count <- occ_count(
taxonKey = Key,
georeferenced = TRUE)
ifelse(Count > 200000, useMap <- TRUE, useMap <- FALSE)
if(useMap == FALSE){
# Use target group background ==============================================
message('Less than 200,000 records, using primary species occurrence data...\nIf you prefer to get a bias grid from the GBIF map API, use the function create_bias_grid.\n')
out <- occ_search(
taxonKey = Key,
return = 'data',
limit = limit,
decimalLatitude = ifelse(
latFilter,
paste(latMin,latMax,sep=','),
paste('-90','90',sep=',')),
decimalLongitude = ifelse(
lonFilter,
paste(lonMin,lonMax,sep=','),
paste('-180','180',sep=',')),
hasCoordinate = TRUE,
hasGeospatialIssue = FALSE)
}else if(useMap == TRUE){
# Use map api ==============================================================
message('\nMore than 200,000 records, using map API for aggregated data...\nIf you prefer to use occurrence records, use function create_target_group_background_data.\n')
out <- create_bias_grid(
taxonkey = as.numeric(Key),
lonMin = NULL,
lonMax = NULL,
latMin = NULL,
latMax = NULL,
nbreaks = nbreaks,
breaks = breaks)
}
return(out)
}
|
e18dd8452366516366e10d5f0d6af4115f731dc8 | a0f93433c57753c4dab79805c57b3cb0031b8304 | /man/plot_rand_KLD.Rd | d36bb388818c05bbbf6602e3aa4e2168c3bee970 | [
"MIT"
] | permissive | alexisvdb/singleCellHaystack | 9dcc59288d3ba2c34bdfc906d45317d6d885f25d | c705b95cd2bf01575df938574864c54ba78714d3 | refs/heads/master | 2023-08-16T02:58:48.850113 | 2023-08-05T14:05:46 | 2023-08-05T14:05:46 | 170,470,927 | 71 | 8 | NOASSERTION | 2022-10-21T07:09:12 | 2019-02-13T08:35:23 | R | UTF-8 | R | false | true | 742 | rd | plot_rand_KLD.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/debug.R
\name{plot_rand_KLD}
\alias{plot_rand_KLD}
\title{plot_rand_KLD}
\usage{
plot_rand_KLD(x, n = 12, log = TRUE, tail = FALSE)
}
\arguments{
\item{x}{haystack result.}
\item{n}{number of genes from randomization set to plot.}
\item{log}{whether to use log of KLD.}
\item{tail}{whether the genes are chosen from the tail of randomized genes.}
}
\description{
Plots the distribution of randomized KLD for each of the genes, together with
the mean and standard deviation, the 0.95 quantile and the 0.95 quantile from
a normal distribution with mean and standard deviations from the distribution
of KLDs. The logCV is indicated in the subtitle of each plot.
}
|
3a783bf040292bf4517f97c09ac54a69ac0bfc1e | f02a605c4f0aa4f79723639a60cdae6556d2aa80 | /lecciones/fundamentos.R | 09cca2f022ff97a1e9bdea690189c83a2eae6eb8 | [] | no_license | manununhez/datascience-course | 4c7c086d1190a3c4265b4ff731621ff49216ae98 | b45d0ad6a3ee4b1593a697686dfcc2a53ccdc579 | refs/heads/master | 2021-05-08T02:45:24.374062 | 2018-02-01T02:35:46 | 2018-02-01T02:35:46 | 108,132,058 | 0 | 0 | null | 2017-10-24T13:39:09 | 2017-10-24T13:39:09 | null | UTF-8 | R | false | false | 12,260 | r | fundamentos.R | v = c() #creamos vector
nu = c(0.5, 0.6) #vector
l1 = c(FALSE, FALSE, TRUE)
l2 = c(T, F)
ch = c('a')
it = 9:29
co = c(1+0i,2+4i)
v = vector('numeric', length = 10) #otra forma para crear vectores. Vector de 10 columnas llenas de cero (1x10)
v[1] = 5
v #auto-impresion
print (v) #impresion explicita
y = c(1.7, 'a') #character
y #tanto 1.7 y a son caracteres -> "1.7" y "a". Siempre debe tener el mismo tipo, entonces R convierte a caracteres
y = c(FALSE, 2) #numeric
y #convirtio a numerico. 1, 2. Son numericos, no tienen comillas. FALSE = 0, TRUE = 1
y = c('a', TRUE)#convierte el logico a caracter. R no sabe a que valor corresponde 'a'.
y
####
#coecion o conversion explicita
#####
x = 0:6
class(x) #con class averiguamos la clase de un objeto
x = as.numeric(x)
x = as.character(x)
x = as.logical(x) # todo lo mayor a cero, es TRUE. FALSE = 0
#matrices
m = matrix(1:6 , nrow = 2, ncol = 3) #la matriz en R se crea por COLUMNAS!!!
m[1,]
m[,2]
x = 1:3
y = 10:12
z = cbind(x,y) #se acopla por columna
z = rbind(x,y) #se acopla por fila
#listas
x = list(1,"a", TRUE, 1 + 4i) #crea un array de arrays(de una dimension), donde cada subarray tiene el elemento de tipos distintos
x[1] #accedemos al primer vector
x[[1]]#accedemos al primer elemento del primer vector
#factor
x = factor(c('yes','yes','no','yes','no'))
table(x) #cuantas ocurrencias de cada categoria
x = factor(c('yes','yes','no','yes','no'), levels = c('yes','no')) #variable ordinal (con orden)
x = c(1,2,NA,10,3)
is.na(x)
x = factor(x)
#Data Frame
#No es lo mismo que matriz, porque puede tener valores de distintos tipos. La matriz tiene todo de un solo tipo. Nombre de las columnas y registros, solo data frame
#TABLAS
x = data.frame(c1=c(1:5), c2=c(T, T, F, F, T), c3=c('a','b','c','d','e'))
nrow(x) #numero de filas del dataframe
ncol(x) #numero de columnas
names(x) #nombre de las columnas del data frame
#IF
x = 2
y = 0
if(x > 3){
y = 10
}else{
y = 50
}
if(x == 2){
print('Este valor es 2')
}
#for
x = data.frame(c1 = 4:6, c2 = 18:20)
for(i in seq_len(nrow(x))){ #imprimimos todos los elementos del data frame (por fila)
print(x[i,'c1']) #imprimos el valor de la primera columna (nombre)
print(x[i,2]) #imprimimos el valor de la segunda columna (numero)
}
for(i in seq_len(ncol(x))){ #imprimimos todos los elementos del data frame (por columna)
print(x[1, i]) #imprimos el valor de la primera fila (nombre)
print(x[2,i]) #imprimimos el valor de la segunda fila (numero)
}
########11-21-17
#apply
x = data.frame(c1 = 1:3, c2 = 10:12); x
apply(x, 2, median) # apply(<objeto>, <fila (1) o columna (2)>, <funcion>)
#funciones con apply
mult = function(x, c) {
return (x*c)
}
apply(x, 2, mult, 5) # apply(<objeto>, <fila (1) o columna (2)>, <funcion>, <parametro>)
area_circulo = function(r) {
return (3.14*r^2)
}
area_circulo = function(r,p) {
return (p*r^2)
}
sapply(x[,'c1'], area_circulo) #solo los elementos de la primera columna
#Files management
data = read.csv(file = '../proyecto/data/becal-cobertura.csv', header = T, stringsAsFactors = F)
write.csv(x, 'leccion4.csv', row.names=F)
#Graphics
library(datasets) #datasets in R
autos = mtcars
hist(autos$mpg, col='green', main='Distribuci?n de las millas por gal?n',
+ xlab='Millas x gal?n', ylab='Frecuencia') #en este ejemplo, la distribucion no es normal, es decir, los autos en este dataset consumen relativamente poco combustible
boxplot(autos$hp, col='red', main='Distribuci?n de caballos de fuerza',
ylab='Caballos de fuerza') #si la barra dentro del rectangulo rojo esta en el medio, la distribucion es normal. En este ejemplo no es normal.
barplot(table(autos$am), col='green', xlab='Tipo de transmisi?n',
main='Nro. de veh?culos por tipo de transmisi?n') #table(autos$am) cuenta la frecuencia de los tipos automaticos o no.
plot(presidents$start, ylab = 'Porcentage de aprobaci?n (%)', xlab='A?o',
main = 'Aprobaci?n (1er cuatrimestre) Presidentes de EEUU')
plot(autos$mpg, autos$wt, col='blue', xlab='Millas por gal?n', ylab='Peso (libras)',
main='Relaci?n entre peso del veh?culo y millas recorridas por gal?n') #los vehiculos mas pesados, consumen mas combustible
#Particion
x = data.frame('var1'=sample(1:3),'var2'=sample(6:8),'var3'=sample(11:13)) #sample toma los valores dentro del rango definido, pero no utiliza siempre el mismo orden
#select data frame: (specific value, range, vector, logic expresions)
x[x$var1 < 5 & x$var2 > 10,] #and
x[x$var1 < 5 | x$var2 > 10,] #or
x[x$apellido == 'gonzalez',] #equal
#ordenamiento
sort(x$var1) #ascendente
sort(x$var2, decreasing = T) #descendente
#BECAL
becal = read.csv(file = '../proyecto/data/becal2017.csv', header = T, stringsAsFactors = F, fileEncoding = "utf-8")
becal_c = read.csv(file = '../proyecto/data/becal-cobertura.csv', header = T, stringsAsFactors = F, fileEncoding = "utf-8", strip.white = TRUE)
becal[1:5,'Sexo'] #seleccionar el sexo de los primeros 5 registros
tolower(becal[,'Sexo'])[1:5] #convertir en minuscula los valores de la columna sexo, y solo mostrar como resultado los primeros 5 resultados
becal$Sexo = tolower(becal[,'Sexo']) #asignar forma 1
becal[,'Sexo'] = tolower(becal[,'Sexo']) #asignar forma 2
becal[1:2,'Fecha.firma.de.Contrato']
as.character(becal[1:5,'Fecha firma de Contrato'])
strsplit(as.character(becal[1:5,'Fecha firma de Contrato']), '/')
strsplit(becal[1:2,'Fecha firma de Contrato'],"/")
str(becal[1:2,'Fecha firma de Contrato'])
becal[1:2,'C.I.']
gsub(',', '', becal[,'C.I.'])[1:2] # eliminar (o remplazar por vacio) las comas del texto c?dula
becal$C.I. <- gsub(',', '', becal[,'C.I.'])
becal[1:2,'Fecha.firma.de.Contrato']
strsplit(becal[1:2,'Fecha.firma.de.Contrato'], '/') # divir el texto de fecha utilizando la barra como separ
becal[1:2,'C.I.']
gsub(',', '', becal[,'C.I.'])[1:2] # eliminar (o remplazar por vacio) las comas del texto c?dula
becal_c[c(1,210,843),'Total.General']
grep('???',becal_c[c(1,210,211, 843),'Total.General']) # buscar la presenciar de caracter euro
grepl('???',becal_c[c(1,210,843),'Total.General']) # buscar la presenciar de caracter euro
library(stringr)
becal[1:2,'Condici?n']
str_trim(becal[1:2,'Condici?n']) # eliminar espacios vac?o al inicio y final del texto
str_trim(becal_c[c(1,210,843),'Total.General']) # eliminar espacios vac?o al inicio y final del texto
# (NO FUNCIONA ACA porQUE STR_TRIM SOLO TE ELIMINA LOS ESPACIoS REDUNDANTES AL COMIENZO Y AL FINAL. SE deberia hacer una expresion regular para quitar los espacios en medio)
becal_c[1,'Universidad.de.Destino']
nchar(becal_c[1,'Universidad.de.Destino']) # contar el n?mero de caracteres del texto
substr(becal_c[1,'Universidad.de.Destino'],16,20) # extraer parte del texto
becal_c[1,c(5,6)]
paste0(becal_c[1,5],' (',becal_c[1,6],')')
#merge
becal$C.I.<- str_trim(gsub(',', '', becal[,'C.I.'])) #quitar comas
becal$C.I.<- str_trim(gsub('\\.', '', becal[,'C.I.'])) #quitar puntos
becal_c$C.I.<- str_trim(gsub(',', '', becal_c[,'C.I.'])) #quitar comas
becal_c$C.I.<- str_trim(gsub('\\.', '', becal_c[,'C.I.'])) #quitar puntos
ambos_becal = merge(becal, becal_c, by.x="C.I.", by.y="C.I.", all.y=TRUE) #all.y porque los que estan en bacal2017 no todos recibieron la beca. En becal_cobertura si recibieron las becas.
#VERIFICAR -- EL MERGE SOLO TIENE 907 filas x 44 columnas
#Dplyr
library(dplyr)
select(becal, C.I., Sexo, Edad) #Select columns
head(select(becal, C.I., Sexo, Edad)) # head sirve para mostrar las primeras n filas del dataframe
dataset_filtrado = filter(becal, Sexo=='Femenino')
dataset_ordenado = arrange(becal, Edad)
dataset_ordenado_desc = arrange(becal, desc(Edad))
head(select(dataset_ordenado, C.I., Sexo, Edad))
#rename
becal_renombrado = rename(becal, ci = C.I., sexo = Sexo, edad = Edad)
head(select(dataset_ordenado_desc, C.I., Sexo, Edad))
#mutate
becal_gs = mutate(becal_c, total_gs=5500*convertir_totalgeneral(Total.General))
head(select(becal_gs, Total.General, total_gs), 5)
#5/12/17 analisis exploratorio
hist(as.numeric(becal17$edad), main=paste("Distribución de becarios por edad (n=",nrow(becal17),")"),
ylab="Frecuencia", xlab="Edad", col = "red")
hist(as.numeric(becal17$edad), main=paste("Distribución de becarios por edad (n=",nrow(becal17),")"),
ylab="Frecuencia", xlab="Edad", col = "red", xlim = c(20, 40))
summary(as.numeric(becal17$edad)) #min, max, media, mean
quantile(as.numeric(becal17$edad)) #para variables numericas
quantile(as.numeric(becal17$edad), probs = c(0.40, 0.65, 0.90)) #valores especificos de quantiles
quantile(as.numeric(becal17$edad), probs = c(0.40, 0.65, 0.90), na.rm = TRUE) #na.rm elimina NA
boxplot(as.numeric(becal17$edad), col='red', main='Distribución de edad de becarios',
ylab='Edad')
var(becal17$edad) #varianza para variables numericas
sd(becal17$edad)#standard deviation para variables numericas
table(becal17$sexo) #para variables categoricas
barplot(table(becal17$universidaddedestino),
main=paste("Becarios por rango de ranking de universidad (n=",nrow(becal17),")"),
ylab="Total", xlab="Rango de ranking", col="blue", las = 2)
#limpieza del grafico ##
becal_limpio = subset(becal17, categoriauni != "sin dato") # elimina los registros "sin dato"
dis_categoriauni = table(droplevels(as.factor(becal_limpio$categoriauni)))
categoria_ordenadas = sort(dis_categoriauni, decreasing = T) # ordena las categorias de mayor a menor
barplot(categoria_ordenadas, main=paste("Becarios por rango de ranking de universidad (n=",
nrow(becal_limpio),")"), ylab="Total", xlab="Rango de ranking", col="blue")
library(stringi)
library(dplyr)
library(stringr)
source('utils.R')
becal17$paisdedestino = limpiar_nombres(becal17$paisdedestino)
dis_pais_destino = table(droplevels(as.factor(becal17$paisdedestino)))
categoria_ordenadas = sort(dis_pais_destino, decreasing = T) # ordena las categorias de mayor a menor
barplot(categoria_ordenadas, main=paste("Becarios por rango de ranking de universidad (n=",
nrow(becal17),")"), ylab="Total", xlab="Rango de ranking", col="blue", las = 2)
### Scatter Plots, grafico de puntos - Relaciones entre dos variables numericas!!!!
plot(becal_completo$mesesdeduraciondeestudios, becal_completo$totalgralusd,
ylab="Costo Total en USD", xlab="Duración Estudio en Meses",
main="Meses de Duración por Costo de Estudio")
#promedios condicionales - PARA SOLUCIONAR OVERPLOTTING
groupo_meses = group_by(becal_completo, mesesdeduraciondeestudios)
total_x_gm = summarize(groupo_meses,
total_mean = mean(totalgralusd))
plot(total_x_gm$mesesdeduraciondeestudios, total_x_gm$total_mean,
ylab="Costo Total Promedio en USD", xlab="Duración Estudio en Meses",
main="Meses de Duración por Costo de Estudio")
#Relaciones entre dos variables: cateogircas y numericas:::: BOXplots
#correlaciones
becal_sin_na = filter(becal_completo, totalgralusd != 'NA') # elimino los valores ausentes
cor(as.numeric(becal_sin_na$mesesdeduraciondeestudios), becal_sin_na$totalgralusd) #estas dos variables estan fuermente relaciones.
#Mientras mas largo el estudio, es mas costoso.
#Pero no se puede decir que es una relacion de causalidad, pueden haber mas factores.
#Correlacion no implica causalidad.
hh <- t(VADeaths)[, 5:1]
mybarcol <- "gray20"
mp <- barplot(hh, beside = TRUE,
col = c("lightblue", "mistyrose",
"lightcyan", "lavender"),
legend = colnames(VADeaths), ylim = c(0,100),
main = "Death Rates in Virginia", font.main = 4,
sub = "Faked upper 2*sigma error bars", col.sub = mybarcol,
cex.names = 1.5)
#segments(mp, hh, mp, hh + 2*sqrt(1000*hh/100), col = mybarcol, lwd = 1.5)
#stopifnot(dim(mp) == dim(hh)) # corresponding matrices
mtext(side = 1, at = colMeans(mp), line = -5,
text = paste("Mean", formatC(hh)), col = "red") |
8390fcb5303920ace34e31f54011266499c3c150 | 5f4e127bf2a52486df01088384d7c5926cfa277e | /man/resumo_cba_por_iniciativa.Rd | f21243d0e26e7353bf28a32b34b475b49559949a | [] | no_license | pedroliman/oshcba | 0b4ac93c2135e85b71dde834b6eb313980b98db9 | 01ef42e96a7089fc6f4f35912e825a484b017208 | refs/heads/master | 2020-02-26T15:04:57.043923 | 2018-08-01T00:48:22 | 2018-08-01T00:48:22 | 94,784,015 | 0 | 1 | null | 2017-07-17T20:52:06 | 2017-06-19T14:12:00 | R | UTF-8 | R | false | true | 433 | rd | resumo_cba_por_iniciativa.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analise.R
\name{resumo_cba_por_iniciativa}
\alias{resumo_cba_por_iniciativa}
\title{resumo_cbr_por_iniciativa}
\usage{
resumo_cba_por_iniciativa(resultados_cbr)
}
\arguments{
\item{resultados_cbr}{data.frame com resultados por iniciativa (na coluna Cenario.y)}
}
\value{
tiblle com estatísticas por iniciativa
}
\description{
resumo_cbr_por_iniciativa
}
|
017517016731fd9b79c673a76f87bc5ac481b0ff | 7f459c973e6ea48343f7e7a935429ffd646ef4da | /plot1.R | cecf1625ab53665e4e507dcedeed7d01db8becbf | [] | no_license | cvscastejon/Plotting-Graphs | eedf9c6daa58578283092a411611fd4900e96c52 | ef5c5f02d6c7eed41675d737a649cc9c31b12c46 | refs/heads/master | 2021-05-26T16:25:54.661921 | 2020-04-08T16:21:00 | 2020-04-08T16:21:00 | 254,136,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 989 | r | plot1.R | #date manipulaton done using lubridate package
library(lubridate)
#dataset reads entire table
household <- read.table("household_power_consumption.txt", sep = ";",
header = TRUE, colClasses = c("character", "character", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric"),
na.strings = "?")
#columns date and time are converted to Date class
household$Date <- dmy(household$Date)
household$Time <- hms(household$Time)
#subsetting to February First and Second of 2007
household <- household[(household$Date == ymd("2007-02-01") | household$Date == ymd("2007-02-02")),]
#open png device with required size
png("Plot1.png",width = 480, height = 480, units = "px")
#Generate histogram on specified device
hist(household$Global_active_power,
xlab = "Global Active Power (kilowatts)",
col = "red",
main = "Global Active Power")
#Close device
dev.off() |
2d09ee54adc1a8c74ae0fef39c0a6f77f625b8b0 | 9deb3a3350deecbff27dde00f3831116d2861d04 | /Scripts/analyser_resultat.R | 784746058188e8097437a171c275cfb036f52ae7 | [] | no_license | LucasNoga/Workspace-R | 8e55674712af47e7af78dc4208cd6d7be56bfcae | dce2f79dd4ecf075d4dafea3844b7a1371f1c804 | refs/heads/master | 2020-12-29T23:39:36.673353 | 2020-02-06T21:18:42 | 2020-02-06T21:18:42 | 238,780,108 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 377 | r | analyser_resultat.R | # Mise à jour de votre espace de travail comme d'habitude
setwd("D:/Dev/R")
# On charge la variable que l'on avait précédemment enregistrée
load("Data/resultat.RData")
# Et on y applique un nouveau traitement
nouveau_resultat <- (resultat + 3)^4
nouveau_resultat <- sqrt(nouveau_resultat)
print(paste("Le nouveau résultat est: ", nouveau_resultat, sep=""))
|
1106fb9d5e698a01a4265f85b62021d5e7442520 | 6520309a6cd2aed30f642fd61a88227e442f835d | /klay_longer_shot_prior.R | 160303e07b1e7d7663895419cd82a034a5a07299 | [] | no_license | bhc3/hothand | efaebb8f35ec2727c50467e61304eff0d5df217e | a0df13f1ccffbb1bf2e67f2dca29065bd07a8316 | refs/heads/master | 2016-08-11T21:14:05.651531 | 2015-10-29T20:12:36 | 2015-10-29T20:12:36 | 45,071,005 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 795 | r | klay_longer_shot_prior.R | ## Create vector of Klay's prior shot, for use in analyzing whether the result
## of one shot influences the next shot. This function will be applied to longer
## shots (i.e. eliminating 'short' ones). It considers two factors in
## determining the prior shot. (1) Is it the start of a new game? If so, the
## prior shot is NA. (2) Is it the first longer shot after a short shot? If so,
## the prior shot is NA.
klay_prior_shot <- function(game_id, shot_no, shot_history) {
n = length(shot_history)
klay_prev_shot <- rep(NA, n)
for(i in 2:n) {
if(game_id[i] != game_id[i-1]) {
klay_prev_shot[i] <- NA
} else if(shot_no[i] - shot_no[i-1] != 1) {
klay_prev_shot[i] <- NA
} else {
klay_prev_shot[i] <- shot_history[i-1]
}
}
return(klay_prev_shot)
} |
66a11eaa1d0bcba28ca4595814f777cbc38f6314 | c6c0881ca260a793a70f5814ab6993c61dc2401c | /unweighted_prs/run_PRS.R | 12f06b988bf98ce2b1f6c3c6e86a9ca5cbb7828d | [] | no_license | luyin-z/PRS_Height_Admixed_Populations | 5fe1c1bef372b3c64bfd143397709c7529a2705a | bf04ba884fd16e5e8c0685ccfbc86ed72d02c7f2 | refs/heads/master | 2023-03-16T17:05:56.658896 | 2020-09-18T16:58:04 | 2020-09-18T16:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,584 | r | run_PRS.R | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
#**************************************
#* CALCULATE POLYGENIC SCORES **
#**************************************
source('~/height_prediction/scripts/PolygenicScore_v2.R')
library("optparse")
library(data.table)
library(dplyr)
library(biomaRt)
library(parallel)
#args<-c("LD_prun","LD_250000_0.01_0.5")
cat(args)
options(scipen=999)
#options(digits=10)
home='~/height_prediction/'
hei<-readRDS(file=paste0(home, args[1], '/', args[2],'/output/hei_', args[3], '_v2.Rds'))
lapply(1:22, function(X) hei[[X]][,b:=ifelse(b>0, 1, ifelse(b<0, -1,0))]) #this line is crucial for the unweighted PRS. Everything else is the same as with the weighted one.
cat('checkpoint 3\n')
PGS<-vector('list',22)
names(PGS)<-c(1:22)
for (CR in 1:22){
print(paste0("Chromosome is ", CR))
try(PolScore2(CHR=CR, panel=args[1], panel2=args[2], tag=args[3]))-> PGS[[CR]]
cat(paste0(CR, ' done\n'))
}
samps<-names(PGS[[1]]) #sum PGS across chromosomes.
PGS2<-vector('list', length(samps))
names(PGS2)<-samps
for (S in samps){
sum(PGS[[1]][[S]],PGS[[2]][[S]],PGS[[3]][[S]],PGS[[4]][[S]],PGS[[5]][[S]],PGS[[6]][[S]],PGS[[7]][[S]],PGS[[8]][[S]],PGS[[9]][[S]],PGS[[10]][[S]],PGS[[11]][[S]],PGS[[12]][[S]],PGS[[13]][[S]],PGS[[14]][[S]],PGS[[15]][[S]],PGS[[16]][[S]],PGS[[17]][[S]],PGS[[18]][[S]],PGS[[19]][[S]],PGS[[20]][[S]],PGS[[21]][[S]],PGS[[22]][[S]])->PGS2[[S]]
cat(paste0(S, ' done\n'))
}
saveRDS(PGS2, file=paste0(home, 'unweighted_prs/output/PGS_', args[2], '_', args[3], '.Rds'))
#TheEnd
|
c2eab11ba0acd1b8efda74a4435bba06fddb3013 | 1ec7a5d283eab88a9cceffecf61ad06569cc92c2 | /publication_data/CompareListeria.R | 64cbdd24f530412f100afa00117b7fc531e03f2c | [] | no_license | alexsweeten/snacc | 1facba386900323d15de9f1532b863dd8d8b5c7a | 8207466655535bcb0aa72b4983508b98f429356f | refs/heads/master | 2022-03-05T01:54:10.318998 | 2019-11-12T03:02:24 | 2019-11-12T03:02:24 | 149,658,052 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,351 | r | CompareListeria.R | require(ape)
require(phangorn)
require(treespace)
publication_trees<- readRDS("publication_trees.rds")
# reference tree obtained from https://github.com/johnlees/which_tree/blob/master/tree_compare.R
listeria_realtr <- midpoint(read.tree(paste(sep="/","benchmark_trees/RealTree_Listeria.nwk")))
listeria_realtr$edge.length <- listeria_realtr$edge.length*0.01 # correct for scaling introduced by ALF
listeria_samples <- sort(listeria_realtr$tip.label)
# Draw trees from distance matrices
temp = read.csv("listeria_distances/mash_listeria_distances.csv", sep=",")
listeria_mash.matrix <- as.matrix(temp, header=TRUE)
dimnames(listeria_mash.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/listeria_snacc_lzma.csv", sep=",")
listeria_snacc_lzma.matrix <- as.matrix(temp)
dimnames(listeria_snacc_lzma.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/listeria_snacc_lzma_reverse.csv", sep=",")
listeria_snacc_lzma_reverse.matrix <- as.matrix(temp)
dimnames(listeria_snacc_lzma_reverse.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/listeria_snacc_lz4.csv", sep=",")
listeria_snacc_lz4.matrix <- as.matrix(temp)
dimnames(listeria_snacc_lz4.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/listeria_snacc_bzip2.csv", sep=",")
listeria_snacc_bzip2.matrix <- as.matrix(temp)
dimnames(listeria_snacc_bzip2.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/listeria_snacc_gzip.csv", sep=",")
listeria_snacc_gzip.matrix <- as.matrix(temp)
dimnames(listeria_snacc_gzip.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/listeria_snacc_zlib.csv", sep=",")
listeria_snacc_zlib.matrix <- as.matrix(temp)
dimnames(listeria_snacc_zlib.matrix) = list(listeria_samples, listeria_samples)
listeria_andi_bionj <- publication_trees[["BIONJ + andi dist"]]
temp = read.csv("listeria_distances/poppunk_listeria_13.csv", sep=",")
listeria_poppunk13.matrix <- as.matrix(temp)
dimnames(listeria_poppunk13.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/poppunk_listeria_17.csv", sep=",")
listeria_poppunk17.matrix <- as.matrix(temp)
dimnames(listeria_poppunk17.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/poppunk_listeria_21.csv", sep=",")
listeria_poppunk21.matrix <- as.matrix(temp)
dimnames(listeria_poppunk21.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/poppunk_listeria_25.csv", sep=",")
listeria_poppunk25.matrix <- as.matrix(temp)
dimnames(listeria_poppunk25.matrix) = list(listeria_samples, listeria_samples)
temp = read.csv("listeria_distances/poppunk_listeria_29.csv", sep=",")
listeria_poppunk29.matrix <- as.matrix(temp)
dimnames(listeria_poppunk29.matrix) = list(listeria_samples, listeria_samples)
listeria_mash_bionj <- midpoint(bionj(listeria_mash.matrix))
listeria_mash_upgma <- midpoint(upgma(listeria_mash.matrix))
listeria_snacc_lzma_bionj <- midpoint(bionj(listeria_snacc_lzma.matrix))
listeria_snacc_lzma_upgma <- midpoint(upgma(listeria_snacc_lzma.matrix))
listeria_snacc_lzma_reverse_bionj <- midpoint(bionj(listeria_snacc_lzma_reverse.matrix))
listeria_snacc_lzma_reverse_upgma <- midpoint(upgma(listeria_snacc_lzma_reverse.matrix))
listeria_snacc_lz4_bionj <- midpoint(bionj(listeria_snacc_lz4.matrix))
listeria_snacc_lz4_upgma <- midpoint(upgma(listeria_snacc_lz4.matrix))
listeria_snacc_bzip2_bionj <- midpoint(bionj(listeria_snacc_bzip2.matrix))
listeria_snacc_bzip2_upgma <- midpoint(upgma(listeria_snacc_bzip2.matrix))
listeria_snacc_gzip_bionj <- midpoint(bionj(listeria_snacc_gzip.matrix))
listeria_snacc_gzip_upgma <- midpoint(upgma(listeria_snacc_gzip.matrix))
listeria_snacc_zlib_bionj <- midpoint(bionj(listeria_snacc_zlib.matrix))
listeria_snacc_zlib_upgma <- midpoint(upgma(listeria_snacc_zlib.matrix))
listeria_poppunk13_bionj <- midpoint(bionj(listeria_poppunk13.matrix))
listeria_poppunk13_upgma <- midpoint(upgma(listeria_poppunk13.matrix))
listeria_poppunk17_bionj <- midpoint(bionj(listeria_poppunk17.matrix))
listeria_poppunk17_upgma <- midpoint(upgma(listeria_poppunk17.matrix))
listeria_poppunk21_bionj <- midpoint(bionj(listeria_poppunk21.matrix))
listeria_poppunk21_upgma <- midpoint(upgma(listeria_poppunk21.matrix))
listeria_poppunk25_bionj <- midpoint(bionj(listeria_poppunk25.matrix))
listeria_poppunk25_upgma <- midpoint(upgma(listeria_poppunk25.matrix))
listeria_poppunk29_bionj <- midpoint(bionj(listeria_poppunk29.matrix))
listeria_poppunk29_upgma <- midpoint(upgma(listeria_poppunk29.matrix))
#Create identity vectors
test_listeria <- diag(listeria_snacc_bzip2.matrix)
listeria_bzip2_identity <- mean(test_listeria)
test_listeria <- diag(listeria_snacc_gzip.matrix)
listeria_gzip_identity <- mean(test_listeria)
test_listeria <- diag(listeria_snacc_lz4.matrix)
listeria_lz4_indentity <- mean(test_listeria)
test_listeria <- diag(listeria_snacc_lzma.matrix)
listeria_lzma_identity <- mean(test_listeria)
test_listeria <- diag(listeria_snacc_zlib.matrix)
listeria_zlib_identity <- mean(test_listeria)
#Create correlation matrix
matrix_list_listeria <- list(mash = dist(listeria_mash.matrix),
bzip2 = dist(listeria_snacc_bzip2.matrix),
gzip = dist(listeria_snacc_gzip.matrix),
lz4 = dist(listeria_snacc_lz4.matrix),
lzma = dist(listeria_snacc_lzma.matrix),
zlib = dist(listeria_snacc_zlib.matrix))
temp <- c()
for(x in matrix_list_listeria){
for(y in matrix_list_listeria){
z <- mantel.rtest(x,y,nrepet=100)
temp <- c(temp, z)
}
}
correlation_lis.matrix <- matrix(
temp,
nrow=6,
ncol=6
)
row.names(correlation_lis.matrix) <- names(matrix_list_listeria)
colnames(correlation_lis.matrix) <- names(matrix_list_listeria)
#plot correlation matrix
test <- corrplot(correlation_lis.matrix, type = "lower", order = "hclust",
tl.col = "black", tl.srt = 35, insig="p-value", sig.level = -1)
plot(test)
#test <- cophylo(listeria_realtr, listeria_snacc_lzma_bionj, fsize=0.7)
#plot(test, fsize=0.5)
|
253d240ca34ecdf70de35208ccb7536841137138 | 584167605daffbc5d5046f43a34030f12185d815 | /man/get_info.Rd | b679b5a6bad998f3d137bbb569ae3aebe95fdb94 | [] | no_license | ibarraespinosa/openmpf | 1be34733f095b4313588c94ed293af1548af9c27 | c55939295f7642ee0063d141111a197b2997c803 | refs/heads/master | 2021-02-19T10:40:59.087134 | 2020-03-06T21:31:09 | 2020-03-06T21:31:09 | 245,305,220 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 229 | rd | get_info.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_env_info.R
\name{get_info}
\alias{get_info}
\title{get info}
\usage{
get_info()
}
\description{
\code{\link{get_info}} displays environment info
}
|
2f944d5bfe7b18bfa13a64cc2b92be8b35127487 | c59ded81315e5651b49437d0be06ea66e4d10f5f | /R/retrieve_aop_data.R | 35ee4d886987d547288f1deab52549407740cedb | [
"MIT"
] | permissive | vscholl/neonVegWrangleR | 7870d4e5b6ff56b8ae51763d2fe5c3f492032cb3 | e2bf156d07fbc3cbe8d2685adbc484d78540b949 | refs/heads/master | 2021-09-08T23:27:55.657138 | 2021-09-03T15:28:35 | 2021-09-03T15:28:35 | 215,654,953 | 1 | 2 | null | 2021-09-03T15:28:36 | 2019-10-16T22:22:27 | R | UTF-8 | R | false | false | 2,781 | r | retrieve_aop_data.R | #' download AOP data where vst data exists for specified year and site
#'
#'
#' @inheritParams str_detect
#' @return A list of dataframe
#' @export
#' @examples from_inventory_to_shp()
#' @importFrom magrittr "%>%"
#' @import neonUtilities, tidyverse, readr
#'
retrieve_aop_data <- function(data, year = 2019,
products = c(#"DP3.30010.001" # lidar-derived DTM, DSM
"DP3.30006.001" # hyperspectral reflectance
#,"DP3.30025.001" # lidar-derived slope, aspect
#,"DP3.30015.001" # canopy height model
#,"DP1.30003.001" # lidar point cloud
)){
library(tidyverse)
library(parallel)
library(reticulate)
library(neonUtilities)
options(scipen = 999)
# extract information needed to get AOP tiles
coords_for_tiles <- data %>%
dplyr::select(plotID, siteID, utmZone, plotEasting, plotNorthing, year)
colnames(coords_for_tiles)[4:5] <- c("easting", "northing")
# collect years per plot per date
#year = substr(year, 1, 4)
#coords_for_tiles <- cbind.data.frame(coords_for_tiles, year)
# get tiles dimensions
coords_for_tiles$easting <- as.integer(coords_for_tiles$easting / 1000) * 1000
coords_for_tiles$northing <- as.integer(coords_for_tiles$northing / 1000) * 1000
# get list of tiles with vegetation structure
tiles <- coords_for_tiles[-1] %>% unique
tiles <- tiles[complete.cases(tiles),]
# convert CHEQ into STEI (only the latter on the portal)
which_cheq = tiles$Easting > 500000 & tiles$siteID == "STEI"
tiles[which_cheq, "siteID"] <- "CHEQ"
tiles <- tiles %>% unique
# loop through tiles and data products: default is topographic and RS data
for(ii in 1:nrow(tiles)){
for(prd in products){
tryCatch({
#elevation
neonUtilities::byTileAOP(prd,
site = tiles[ii, "siteID"],
year = tiles[ii, "year"],
tiles[ii, "easting"],
tiles[ii,"northing"],
buffer = 0,
check.size = F,
savepath = paste("/orange/ewhite/s.marconi/AOP_Chapter3/",
prd,
"/",
sep = ""))
}, error = function(e) {
print(paste("site",tiles[ii,"siteID"],
"could not be fully downloaded! Error in retrieving:",
prd, "for year", tiles[ii,"year"], ". error returned:", e))
})
}
}
}
|
7c9ff1dc1f763ce6daa8bb72cb525c77c2cf8866 | 912b0a1ed246b67ecb114401cdebe9ae3f359e31 | /Homework1_2/Homework1_2.R | 034f5f77148db92a3e48ad3a6f1a251915516576 | [] | no_license | josemprb/IntelligentDataAnalysis | 5da84b7a10c38dda26d3296119103cac2aa50000 | f168475e3ea85c511f9aaa4b9bef81a2f598063d | refs/heads/main | 2023-07-19T13:29:04.936479 | 2021-08-30T12:42:30 | 2021-08-30T12:42:30 | 401,339,141 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,986 | r | Homework1_2.R | setwd("C:/DATOS/MasterEIT/EntryYear/1Semester/IntelligentDataAnalysis/Labs/HomeDir")
# Importing data set
cars=read.table("cars-PCA.txt")
colnames(cars)=c("mpg","cylinders","engine_displacement","horsepower",
"weight","acceleration","model_year","origin","car_name")
#####
# 1.2.1
### a) Choose a quantitative variable and explore its distribution in terms of
# descriptive measures of center, dispersion, skewness and kurtosis. Is a normal
# model a plausible one for its distribution? If the answer is no, can you think
# of a transformation of the variable that improves normality. Are there any
# outliers?
### b) Choose two quantitative variables and describe its joint bivariate
# distribution. Does it seem to be Normal? Are there any outliers?
### c) Choose a subset of 4 or 5 quantitative variables and explore linear
# relationships through:
# --> R matrix of pairwise correlations
# --> Matrix of partial correlations
# --> Coefficient of determination (function r2multv() we define in R)
# --> The determinant of R (correlation matrix) as an overall measure of
#linear relationships.
# --> An eigenanalysis of matrix R, looking for really small eigenvalues.
#####
# 1.2.2 Permutation test
install.packages("devtools")
install.packages("ggpubr")
library("devtools")
library("ggpubr")
library("dplyr")
# We first load the data contained in RestaurantTips.rda
load("RestaurantTips.rda")
### A)
## A1) Choose variables Bill and PctTip to analyse their linear dependency through Pearson's
# correlation coefficient. Just looking at the scatterplot, it is hard to tell whether this
# coefficient is significantly different from zero (check this!).
cor.test(RestaurantTips$Bill, RestaurantTips$PctTip, method= "pearson")
ggscatter(RestaurantTips, x = "Bill", y = "PctTip",
add = "reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",
xlab = "Total Bill", ylab = "Percentage of Tip")
# The correlation coefficient is 0.14, different from 0. This means that correlation between
# Bill and PctTip is different from 0. Therefore, there's a linear relationship between the
# amount paid as bill, and the tip % that people gives after knowing such bill. This is also
# verified when p-value is bigger than significance value (0.05).
## A2) Conduct a permutation test to test the null hypothesis that the correlation coefficient
# is 0 vs the alternative that it is different from 0. Run R = 10000 simulations.
# We follow the guide given to do the permutation test. First of all, we compute the observed
# correlation between the variables Bill and PctTip.
Ptest_Tips <- select(RestaurantTips, Bill, PctTip)
r_obs = cor(RestaurantTips$Bill, RestaurantTips$PctTip) # r_obs = 0.135
# Then we set number of permutations up to R = 10000 and use set.seed command.
R = 10000
set.seed(1)
# Second and third steps, permute Bill's among PctTip's, and do it 10000. Then,
# calculate r correlation value between both variables. It is stored in r array.
for (i in 1:R) {
Tip_Rep <- data.frame("PctTip" = RestaurantTips$PctTip, "Bill" = sample(RestaurantTips$Bill,157,TRUE))
r_aux = cor(Tip_Rep)
r[i] = r_aux[1,2]
aux[i] <- ifelse(r[i]>r_obs,1,0)
}
p_value_A = sum(aux)/R
# We import ggplot2 library and plot a bar plot that differentiates
# bills and percentage of tip's given.
RestaurantTips$PctTip_Aux <- cut(RestaurantTips$PctTip, c(0,10,20,30,40))
RestaurantTips$Bill_Aux <- cut(RestaurantTips$Bill, c(0,15,30,45,60,75))
table(RestaurantTips$PctTip_Aux)
table(RestaurantTips$Bill_Aux)
ggplot(RestaurantTips, aes(x = PctTip_Aux, y=..prop.., fill = Bill_Aux, group=Bill_Aux)) +
geom_bar(position=position_dodge()) +
xlab("% of Tips") +
ylab("Total Count") +
labs(fill = "Total Bill")
### B) Repeat the analysis deleting the values for three customers that left a tip greater than
# 30% of the bill. These generous customers seem to be outliers.
## B1) Repeat the correlation test, with its scatterplot.
# Include the filter
TipsFiltered <- filter(RestaurantTips, PctTip < 30)
cor.test(TipsFiltered$Bill, TipsFiltered$PctTip, method= "pearson")
ggscatter(TipsFiltered, x = "Bill", y = "PctTip",
add = "reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",
xlab = "Total Bill", ylab = "Percentage of Tip")
# When plotting, we realyse that those three values were outliers as distribution keeps the
# same without them. Correlation between variables increase as outliers usually decrease this value.
# Also, p-value decreases but still is below 0.05, so we select the null hypothesis (H0).
## B2) Conduct again the permutation test, without the outliers.
# We follow the guide given to do the permutation test. First of all, we compute the observed
# correlation between the variables Bill and PctTip.
Ptest_Tips <- select(TipsFiltered, Bill, PctTip)
r_obs = cor(TipsFiltered$Bill, TipsFiltered$PctTip) # r_obs = 0.2198
R = 10000
set.seed(1)
# Second and third steps, permute Bill's among PctTip's, and do it 10000. Then,
# calculate r correlation value between both variables. It is stored in r array.
for (i in 1:R) {
Tip_Rep_B <- data.frame("PctTip" = TipsFiltered$PctTip, "Bill" = sample(TipsFiltered$Bill,154,TRUE))
r_aux = cor(Tip_Rep_B)
r[i] = r_aux[1,2]
aux[i] <- ifelse(r[i]>r_obs,1,0)
}
# Compute the p-value
p_value_B = sum(aux)/R
TipsFiltered$PctTip_Aux <- cut(TipsFiltered$PctTip, c(0,10,20,30))
TipsFiltered$Bill_Aux <- cut(TipsFiltered$Bill, c(0,15,30,45,60,75))
table(TipsFiltered$PctTip_Aux)
table(TipsFiltered$Bill_Aux)
ggplot(TipsFiltered, aes(x = PctTip_Aux, y=..prop.., fill = Bill_Aux, group=Bill_Aux)) +
geom_bar(position=position_dodge()) +
xlab("% of Tips") +
ylab("Total Count") +
labs(fill = "Total Bill")
|
ef4a12cdd37519d9f96cb2a68893ef097e64468a | e9a5a9e952a9ccac535efe64b96cc730b844677b | /man/setRowHeight-methods.Rd | bb4f3eedaaf007da7b8885e13ba55d20699097e9 | [] | no_license | miraisolutions/xlconnect | 323c22258439616a4d4e0d66ddc62204094196c9 | ae73bfd5a368484abc36638e302b167bce79049e | refs/heads/master | 2023-09-04T05:27:42.744196 | 2023-08-30T07:10:44 | 2023-08-30T07:10:44 | 8,108,907 | 114 | 35 | null | 2023-08-30T07:10:46 | 2013-02-09T11:17:42 | R | UTF-8 | R | false | false | 1,566 | rd | setRowHeight-methods.Rd | \name{setRowHeight-methods}
\docType{methods}
\alias{setRowHeight}
\alias{setRowHeight-methods}
\alias{setRowHeight,workbook,character-method}
\alias{setRowHeight,workbook,numeric-method}
\title{Setting the height of a row in a worksheet}
\description{
Sets the height of a row in a worksheet.
}
\usage{
\S4method{setRowHeight}{workbook,character}(object,sheet,row,height)
\S4method{setRowHeight}{workbook,numeric}(object,sheet,row,height)
}
\arguments{
\item{object}{The \code{\linkS4class{workbook}} to use}
\item{sheet}{The name or index of the sheet to edit}
\item{row}{The index of the row to resize}
\item{height}{The height in points. If \code{height < 0} (default: -1),
the row will be sized to the sheet's default row height.}
}
\details{
Note that the arguments \code{sheet}, \code{row} and \code{height} are
vectorized. As such the row height of multiple rows (potentially on
different worksheets) can be set with one method call.
}
\author{
Martin Studer\cr
Mirai Solutions GmbH \url{https://mirai-solutions.ch}
}
\seealso{
\code{\linkS4class{workbook}}, \code{\link[=setColumnWidth-methods]{setColumnWidth}}
}
\examples{
\dontrun{
# mtcars xlsx file from demoFiles subfolder of package XLConnect
mtcarsFile <- system.file("demoFiles/mtcars.xlsx", package = "XLConnect")
# Load workbook
wb <- loadWorkbook(mtcarsFile)
# Sets the row height of the 1st row on sheet 'mtcars'
# to 20 points
setRowHeight(wb, sheet = "mtcars", row = 1, height = 20)
}
}
\keyword{methods}
\keyword{utilities}
|
fa373cb51dbc58bef621228731ef59ba6d05014d | 7e6fb336d601caafe06cc417c988e5674ac50fb0 | /IsoriX/inst/NEWS.Rd | bcbea37180e2f21e3cae05081c1f4a09a1ddcf60 | [] | no_license | courtiol/IsoriX | a5da2b3d4aa87408a3a4e3c2f4ea832d6ed5a777 | f85641cd7741b6bd004fa0443af67d50d8dbd199 | refs/heads/master | 2023-02-10T09:55:32.368822 | 2023-01-13T01:44:35 | 2023-01-13T01:44:35 | 68,238,793 | 12 | 6 | null | 2023-01-13T01:44:36 | 2016-09-14T20:00:46 | R | UTF-8 | R | false | false | 15,332 | rd | NEWS.Rd | \name{NEWS}
\title{IsoriX News}
\encoding{UTF-8}
\section{version 1.0}{
\subsection{Upcoming features planned for future releases}{
\itemize{
\item (version 1.0 does not exist yet)
\item feature requests can be defined and watched here: \url{https://github.com/courtiol/IsoriX/issues}
}
}
}
\section{version 0.9}{
\subsection{Solved BUGS}{
\itemize{
\item the previous released introduced an error in how the variance of the assignment test is computed in the absence of calibration (with important consequence in terms of assignments). This is now fixed (#151).
}
}
\subsection{Minor change}{
\itemize{
\item the base package 'colourspace' is now suggested to avoid a note in R CMD check.
}
}
}
\section{version 0.8.3}{
\subsection{New features}{
\itemize{
\item the function `calibfit()` gains an argument method that allows for selecting one of four calibration methods ("wild", "lab", "desk", "desk_inverse"). This allows for users to use 1) calibration samples associated with unknown environmental isotopic values, 2) calibration samples associated with known environmental isotopic values, or 3 & 4) the intercept and slope of a calibration relationship computed by others (e.g. values found in a paper). Note: the desk* methods allow for the consideration of a fractionaction factor too (i.e. slope = 0). See \code{calibfit} for details. (#20 & #142)
\item the function `getelev()` has been completely rewriten so as to rely on the package **elevatr** to download elevation data. You should check `?getelev` for learning how to use the new version of the function, but we retained the core principle of the previous function so that old workflow will only require minor adjustements. The new version still saves a *.tif file on the disk, albeit uing a different file name to avoid (data) confusion. (#140 & #107)
\item the function `isofind()` gains an argument `neglect_covPredCalib` that allows for the computation of a covariance term that was so far neglected in IsoriX. See `?isofind` for details. (#143)
\item the function `prepraster()` gains an argument `values_to_zero` to turn a range of elevation values to zeros (nullify negative elevation values by default). This is particular useful because the new version of `get_elev()` download an elevation raster that includes bathymetry.
\item new internal function `.invert_reg()` to invert regression (used for method "desk_inverse" in `calibfit()`)
}
}
\subsection{Minor change}{
\itemize{
\item when calling `plot()` on an object created with `calibfit()`, the plotting function now returns the fitted values and CI for users to be able to make alternative plots (#44)
\item new argument `xlim` for the plotting function for calibration fits
\item new argument `line` for customising how to plot the regression line in calibration fits
\item the summary method for calibration fits now displays the residual variance
\item `calibfit()` performs more check on extrapolation (#119)
\item when using `plot()` on an object of class ISOFIT, the x-axis for the plot showing the Matern correlation should have a range more adequate irrespective when autocorrelation is strong over short distances (#134)
\item documentation for `?plot()` now contains a description of what symbols mean in plots (#138)
\item when calling `plot()` on an object created with `isofind()`, the plotting function now detects sample of size 1 and no longer displays "Group" in the title of the assignment plot even if `who` = "group" (#120)
\item all functions accepting a `data.frame` as input should also now be compatible when provided with a `tibble` (#118)
\item typos have been corrected (#130)
\item default y-axis title changed to "Isotopic value in the environment" when plotting calibration fits to be flexible enough irrespective of the methods used in `calibfit()`
}
}
\subsection{Geeky change}{
\itemize{
\item the argument `long_min`, `long_max`, `lat_min` & `lat_max` function `prepsources()` now have explicit default values and should no longer be missing.
\item the version of spaMM required by IsoriX has changed to 3.13 so as to benefit from a new extractor we rely on for the computation of the 4th variance term during assignment (#143)
\item the function depending on the package RandomFields are no longer available since that package has been (for now) retired by CRAN :-(
\item IsoriX should now work with tibbles as inputs (#118)
}
}
\subsection{Solved BUGS}{
\itemize{
\item the printing method for the object of class ISOSCAPE was somehow not exported and thus not used (unreported issue)
\item plotting on a sphere ISOFIND objects did not work in some cases (#126)
}
}
}
\section{version 0.8.2}{
\subsection{New features}{
\itemize{
\item argument ylim for the plotting function for calibration fits
\item it is now possible to calibrate data containing missing isotopic values
\item it is now possible to assign data containing missing isotopic values
}
}
\subsection{Geeky change}{
\itemize{
\item the SpatialPolygons CountryBorders and OceanMask have been rebuilt for possibly improving the compatibility with new sp & rgdal
\item the website for WorlClim has now changed address, so links have been updated
\item rgdal is now listed as a suggested package
}
}
\subsection{Minor change}{
\itemize{
\item several weblinks had changed and have been updated
\item all old defunct functions have been removed from the package
}
}
}
\section{version 0.8.1}{
\subsection{Solved BUGS}{
\itemize{
\item fix issue #113: the plotting function was not working for isoscapes not stored in memory due to a wrong use of the quantile function. Many thanks to Dr. Gary Roemer and Amy Withers for reporting it!
}
}
}
\section{version 0.8.1}{
\subsection{New features}{
\itemize{
\item the datasets used in Courtiol et al. 2019 are now provided
\item many useful functions from raster, rasterVis, lattice... are now re-exported so they can be used without attaching those packages
\item new option in plots that allows to map the isoscape onto a sphere
\item a new dataset PrecipBrickDE containing monthly precipitation amounts for Germany
\item an argument y_title for the plotting function for isoscapes to allow one to simply change the title
\item arguments xlab and ylab for the plotting function for calibration fits
\item new method points for plotting more than one calibration fit
\item the plotting function for assignments can now show the location of the assignment samples
}
}
\subsection{Major changes}{
\itemize{
\item the citations for the package have been updated!
\item many objects have been renamed to prepare the release of the version 1.0
\item the vignettes have now been moved to a bookdown. To access the documentation you should now visit: \url{https://bookdown.org/content/782/}
}
}
\subsection{Minor changes}{
\itemize{
\item all arguments 'bla.bla' have been renamed 'bla_bla'
\item the plotting function for calibfit gains an argument "..." for more control
\item a ploting method for rasterLayer has been included for conveniance
\item the function relevate is now called prepraster
\item the function prepdata is now called prepsources
\item in several functions the argument elevation.raster has been renamed as raster
\item in several functions the argument xxx.data has been renamed as data
}
}
\subsection{Geeky changes}{
\itemize{
\item the file storing the internal functions is now called zzz.R
\item the dontrun and donttest have been replaced by comments due to new R CMD check flags
\item the function downloadfile is now exported
\item large temporary objects are now deleted within isofind to limit memory usage
\item the package is now being tested using testthat, but tests will be implemented in the future
\item a lot of the internal code as been rewriten to comply more closely to the IsoriX coding style
\item the list of suggested packages has been revised and rgdal removed as it caused (again) problems with Travis CI
\item following a change in spaMM predict.HLfit, the prediction are now being made by chunck of 1000 points instead of 150. This should lead to a tiny gain in performance
\item the function isoscape was performing predictions twice every 150 (or now 1000) locations, this was not influencing the isoscapes produced, but this has now been corrected
\item the function prepraster now produces an raster stored in memory if it is possible. This should prevent bugs that appears when using loaded rasters that were previously saved (the temporary link to the hard drive location is no longer correct in this case).
\item the function .objective_fn_calib has been moved within the function calibfit as it is not used elsewhere
\item the function calibfit as been prepared for a possible activation of a random effect for species ID in the future. But whether it would make sense or not remains to be determined.
\item the function .Fisher_method now directly computes the exponential of the log pv if only one value is provided. This leads to much faster assignment in the case of a single observation.
}
}
\subsection{Solved BUGS}{
\itemize{
\item the plotting function for calibration fit was displaying CI based on variance instead of SD
\item the function getprecip and prepcipitate were not handling paths manualy defined properly
\item the plotting functions were crashing in case of no variation in the landscape
\item the plotting functions were crashing when called on multiple-raster objects not stored 'inMemory'
\item the plotting function for fitted model was not displaying one plot in RStudio when called on objects of class MULTIISOFIT
}
}
}
\section{version 0.7.1}{
\subsection{New features}{
\itemize{
\item this is a minor update necessary to maintain compatibility with spaMM 2.4
}
}
\subsection{Geeky changes}{
\itemize{
\item the syntax for the extraction of correlation terms of spaMM objects has changed
}
}
}
\section{version 0.7}{
\subsection{New features}{
\itemize{
\item the calibration step is now optional, allowing for users to use an isoscape directly fitted on tissues instead of precipitation water
\item the function queryGNIP has been renamed and is now called prepdata, this function can also handle other datasets than GNIP
\item the function relevate has been modified to make crop possible around the pacific meridian -180/180 (but several issues remain to handle extra plot layers automatically)
}
}
\subsection{Geeky changes}{
\itemize{
\item an additional options as been added to prevent prompting during examples
\item new internal function .converts_months_to_numbers
}
}
}
\section{version 0.6}{
\subsection{New features}{
\itemize{
\item the maximum duration of running time for examples can now be controlled using IsoriX.options(example_maxtime = XX)
\item due to new GNIP policies, we no longer provide the GNIP dataset for the entire World, but only a subset containing data for Germany (users should thus compile their precipitatin data themselves from the 'wiser' plateform provided by GNIP; see vignette Workflow)
\item it is now possible to control the colours and labels for the levels of isotopes or p-values in plots
\item for plotting, it is no longer needed to load the ocean mask and country borders (it now happens automatically)
\item the function relevate now allows for a cropping larger than the extent of the weather stations by means of the argument margin_pct
\item it is now possible to create the so-called annual averaged precipitation isoscapes!
\item queryGNIP can now split the dataset per month or year at each location during the aggregation
\item new function prepcipitate to prepare the precipitation brick
\item new function getprecip to download monthly precipitation rasters from WorldClim
\item new function isomultifit fitting isoscapes per strata (month, year, or any "split")
\item new function isomultiscape building isoscapes averaged across strata
\item new function create_aliens simulating of organism data
}
}
\subsection{Minor changes}{
\itemize{
\item the inputs for filtering data by month or year using queryGNIP have changed
\item the default fixed effect structure for the mean model is isofit has changed
}
}
\subsection{Geeky changes}{
\itemize{
\item the namespace is now generated with Roxygen2
\item the datasets are now 'lazy-loaded'
\item new vignette for coding conventions
\item changed some object names following our coding convention (more to come)
}
}
}
\section{version 0.5}{
\subsection{Solved BUGS}{
\itemize{
\item the package could not be detached and reloaded
\item the citation was not correct
\item the path in getelev was breaking in some cases
\item the title of the assignment plot was missing when a single individual was plotted
}
}
\subsection{New feature(s)}{
\itemize{
\item new vignette explaining how to export spatial objects to GIS
\item the file GNIPdata has been updated and now contain 2014 data
\item names of all functions and objects have been refactored to remove upper cases
\item links to our GitHub directory have been added
\item new function downloadfile to download non standard elevation raster or any other file
\item function getelev can perform MD5 sum checks if the package 'tools' is installed
\item function getelev can display additional information during download if verbose > 1
\item the column animalID in the assignment dataset can now handle names with spaces
\item added Codecov to track test coverage for the package
}
}
\subsection{Minor changes}{
\itemize{
\item the modification of the option set_ll_warn from the 'sp' package has been moved to onLoad (instead of onAttached) and the original state is now restored while unloading 'IsoriX'
\item the Earth distance method has been moved to the package 'spaMM'
\item function getelev lost its 'address' argument as downloadfile should now be used to download non-standard elevation rasters
\item some typo fixed in documentation files
\item RandomFields moved to suggest
\item .Rd files for documentation are now generated with Roxygen2
\item queryGNIP is now provided with a single month argument specifying the months to select
}
}
}
\section{version 0.4-1}{
\subsection{New feature(s)}{
\itemize{
\item this was the first version of IsoriX submitted to CRAN
}
}
}
|
2b2f91401c24ce03bdd83039a98678b3754516f3 | d8f7bfbe482d98ead30ac58b9c7ae4c254e93579 | /Modern Methods of Data Analysis/script9.R | a88ce0a9b28d50473616819d63832a510473e37d | [] | no_license | Leoberium/Rmisc | 1aa29ab9883f319bdf6b684f78f738079869efee | 7f48c97dd81ae3fcdcbeb364e69c588754b07946 | refs/heads/master | 2020-09-25T05:49:40.574778 | 2019-12-04T19:50:27 | 2019-12-04T19:50:27 | 225,931,702 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,411 | r | script9.R | library(caret)
library(doMC)
library(glmnet)
library(MASS)
library(pROC)
registerDoMC(8)
load('training.RData')
load('testing.RData')
str(training)
str(testing)
load('pre2008Data.RData')
load('year2008Data.RData')
str(pre2008Data)
str(year2008Data)
fullSet <- names(training)[names(training) != "Class"]
predCorr <- cor(training[, fullSet])
highCorr <- findCorrelation(predCorr, .99)
fullSet <- fullSet[-highCorr]
isNZV <- nearZeroVar(training[, fullSet],
saveMetrics = TRUE, freqCut = floor(nrow(training)/5))
fullSet <- rownames(subset(isNZV, !nzv))
str(fullSet)
reducedSet <- rownames(subset(isNZV, !nzv & freqRatio < floor(nrow(training)/50)))
reducedSet <- reducedSet[(reducedSet!= "allPub") &
(reducedSet != "numPeople") &
(reducedSet != "Mar") &
(reducedSet != "Sun")
]
str(reducedSet)
# Logistic Regression
ctrl <- trainControl(summaryFunction = twoClassSummary, classProbs = TRUE,
savePredictions = TRUE)
set.seed(476)
lrReduced <- train(training[, reducedSet], y = training$Class,
method = "glm", metric = "ROC", trControl = ctrl)
lrReduced
head(lrReduced$pred)
lrTestClasses <- predict(lrReduced, newdata = testing[, reducedSet])
lrTestProbs <- predict(lrReduced, newdata = testing[, reducedSet],
type = "prob")
confusionMatrix(data = lrTestClasses, reference = testing$Class,
positive = "successful")
reducedRoc <- roc(response = testing$Class,
predictor = lrTestProbs$successful,
levels = rev(levels(testing$Class)))
plot(reducedRoc, legacy.axes = TRUE)
auc(reducedRoc)
modelstats <- c(0.908, 0.838, 0.783, 0.860)
names(modelstats) <- c("AUC", "Accuracy", "Sensitivity", "Specificity")
# confusionMatrix(data = lrReduced$pred$pred,
# reference = lrReduced$pred$obs)
# reducedRoc <- roc(response = lrReduced$pred$obs,
# predictor = lrReduced$pred$successful,
# levels = rev(levels(lrReduced$pred$obs)))
# plot(reducedRoc, legacy.axes = TRUE)
# auc(reducedRoc)
# Linear Discriminant Analysis
set.seed(476)
ldaFit <- train(x = training[, reducedSet], y = training$Class,
method = "lda", preProcess = c("center", "scale"),
metric = "ROC",
trControl = ctrl)
ldaFit
head(ldaFit$pred)
ldaTestClasses <- predict(ldaFit, newdata = testing[, reducedSet])
ldaTestProbs <- predict(ldaFit, newdata = testing[, reducedSet],
type = "prob")
confusionMatrix(data = ldaTestClasses, reference = testing$Class,
positive = "successful")
reducedRoc2 <- roc(response = testing$Class,
predictor = ldaTestProbs$successful,
levels = rev(levels(testing$Class)))
plot(reducedRoc2, legacy.axes = TRUE)
auc(reducedRoc2)
modelstats <- rbind(modelstats, c(0.921, 0.849, 0.825, 0.863))
rownames(modelstats) <- c("LR", "LDA")
# Partial Least Squares Discriminant Analysis
set.seed(476)
plsFit <- train(x = training[, reducedSet], y = training$Class,
method = "pls", tuneGrid = expand.grid(.ncomp = 1:10),
preProcess = c("center", "scale"),
metric = "ROC",
trControl = ctrl)
plsFit
head(plsFit$pred)
plsTestClasses <- predict(plsFit, newdata = testing[, reducedSet])
plsTestProbs <- predict(plsFit, newdata = testing[, reducedSet],
type = "prob")
confusionMatrix(data = plsTestClasses, reference = testing$Class,
positive = "successful")
reducedRoc3 <- roc(response = testing$Class,
predictor = plsTestProbs$successful,
levels = rev(levels(testing$Class)))
plot(reducedRoc3, legacy.axes = TRUE)
auc(reducedRoc3)
modelstats <- rbind(modelstats, c(0.921, 0.849, 0.841, 0.854))
rownames(modelstats) <- c("LR", "LDA", "PLS")
plot(plsFit)
plsImpGrant <- varImp(plsFit, scale = TRUE)
plot(plsImpGrant)
# Penalized Models
glmnGrid <- expand.grid(alpha = c(0, .1, .2, .4, .6, .8, 1),
lambda = seq(.01, .2, length = 40))
glmnTuned <- train(training[, fullSet], y = training$Class,
method = "glmnet", tuneGrid = glmnGrid,
preProcess = c("center", "scale"),
metric = "ROC",
trControl = ctrl)
plot(glmnTuned, plotType = "level")
glmnTuned
head(glmnTuned$pred)
glmnTestClasses <- predict(glmnTuned, newdata = testing[, fullSet])
glmnTestProbs <- predict(glmnTuned, newdata = testing[, fullSet],
type = "prob")
confusionMatrix(data = glmnTestClasses, reference = testing$Class,
positive = "successful")
reducedRoc4 <- roc(response = testing$Class,
predictor = glmnTestProbs$successful,
levels = rev(levels(testing$Class)))
plot(reducedRoc4, legacy.axes = TRUE)
auc(reducedRoc4)
modelstats <- rbind(modelstats, c(0.931, 0.857, 0.873, 0.848))
rownames(modelstats) <- c("LR", "LDA", "PLS", "GLMN")
modelstats
# Continuing with non-linear models
# Mixture discriminant analysis
load('mdaFit.RData')
plot(mdaFit)
mdaFit
mdaTestClasses <- predict(mdaFit, newdata = testing[, fullSet])
mdaTestProbs <- predict(mdaFit, newdata = testing[, fullSet],
type = "prob")
confusionMatrix(data = mdaTestClasses, reference = testing$Class,
positive = "successful")
reducedRoc5 <- roc(response = testing$Class,
predictor = mdaTestProbs$successful,
levels = rev(levels(testing$Class)))
plot(reducedRoc5, legacy.axes = TRUE)
auc(reducedRoc5)
modelstats <- rbind(modelstats, c(0.921, 0.849, 0.825, 0.863))
rownames(modelstats) <- c("LR", "LDA", "PLS", "GLMN", "MDA")
modelstats
# Neural network
load('nnetFit.RData')
plot(nnetFit)
nnetFit
nnetTestClasses <- predict(nnetFit, newdata = testing[, fullSet])
nnetTestProbs <- predict(nnetFit, newdata = testing[, fullSet],
type = "prob")
confusionMatrix(data = nnetTestClasses, reference = testing$Class,
positive = "successful")
reducedRoc6 <- roc(response = testing$Class,
predictor = nnetTestProbs$successful,
levels = rev(levels(testing$Class)))
plot(reducedRoc6, legacy.axes = TRUE)
auc(reducedRoc6)
modelstats <- rbind(modelstats, c(0.919, 0.842, 0.820, 0.854))
rownames(modelstats) <- c("LR", "LDA", "PLS", "GLMN", "MDA", "NNET")
modelstats
# SVM
load('svmRModel.RData')
svmRModel # doesn't work further
# KNN
load('knnFit.RData')
knnFit
knnTestClasses <- predict(knnFit, newdata = testing[, fullSet])
knnTestProbs <- predict(knnFit, newdata = testing[, fullSet],
type = "prob")
confusionMatrix(data = knnTestClasses, reference = testing$Class,
positive = "successful")
reducedRoc7 <- roc(response = testing$Class,
predictor = knnTestProbs$successful,
levels = rev(levels(testing$Class)))
plot(reducedRoc7, legacy.axes = TRUE)
auc(reducedRoc7)
modelstats <- rbind(modelstats, c(0.830, 0.697, 0.307, 0.921))
rownames(modelstats) <- c("LR", "LDA", "PLS", "GLMN", "MDA", "NNET", "KNN")
modelstats
# NaiveBayes
load('nbPredictors.RData')
load('nbTesting.RData')
load('nbTraining.RData')
load('nBayesFit.RData')
nBayesFit
|
7cc84c72311362d961a5fc9c980fb685aafad1ab | a5f3268b700913ea94ae6ba21caa5144d234703a | /R/methods-sensNumber.R | 4b6cea2950c964fc3da357d9d5057a2bbf142cfb | [] | no_license | saisaitian/PharmacoGx | d2e7d20e5fc8ae40c169b57c71fbe380d0841424 | eeee68a021549bdfab23e85cdf57c15ae54a52cc | refs/heads/master | 2023-01-24T14:31:16.963359 | 2020-11-23T18:16:15 | 2020-11-23T18:16:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,533 | r | methods-sensNumber.R | #' sensNumber Getter
#'
#' Get the sensitivity numbers for a `PharmacoSet` object
#'
#' @describeIn PharmacoSet Return the summary of available sensitivity
#' experiments
#'
#' @examples
#' data(CCLEsmall)
#' sensNumber(CCLEsmall)
#'
#' @param object A \code{PharmacoSet}
#' @return A \code{data.frame} with the number of sensitivity experiments per
#' drug and cell line
#'
#'
#' @importFrom CoreGx sensNumber
#' @importFrom methods callNextMethod
#'
#' @export
setMethod(sensNumber, 'PharmacoSet', function(object){
callNextMethod(object=object)
})
#' Return the number of times each row-column combination occurs in a LongTable
#'
#' Reconstruct the @sensitivity$n list data from a LongTable object. This allows
#' backwards compatibility with the current accessors for the @sensitivity
#' list object.
#'
#' @section WARNING:
#' Because a LongTable has incomplete information about the rows
#' and columns present in a CoreSet, this function is unable to zero
#' pad missing rows and columns. This will need to be implemented in the
#' sensNumber method for a class inheriting from the CoreSet. For example,
#' in a `PharmacoGx::PharmacoSet`, `LongTable` rows are cells and columns
#' are drugs.
#'
#' @param longTable A [`LongTable`] longTable object to rebuild the results
#' of sensNumber for.
#'
#' @return [`matrix`] A row by column matrix containing a count for the number
#' of times a row-column combination occurs in a LongTable object.
#'
#' @keywords internal
#' @noRd
.rebuildN <- function(longTable) {
# Extract the information needed to reconstruct the sensitivityRaw array
meta <- assay(longTable, 'experiment_metadata')[, .(rn, rowKey, colKey)]
setkeyv(meta, c('rowKey', 'colKey'))
rowData <- rowData(longTable, key=TRUE)[, .(cellid, drug_cell_rep, rowKey)]
setkeyv(rowData, 'rowKey')
colData <- colData(longTable, key=TRUE)[, .(drugid, drug_cell_rep, colKey)]
setkeyv(colData, 'colKey')
# join the tables into the original data
num <- merge.data.table(meta, rowData, all=TRUE)
setkeyv(num, 'colKey')
num <- merge.data.table(num, colData, all=TRUE)[, .(cellid, drugid, drug_cell_rep.x)]
num <- dcast(num, cellid ~ drugid, value.var='drug_cell_rep.x',
fun.aggregate=max, fill=0)
#num <- unique(num)
rownames <- num$cellid
num[, cellid := NULL]
num <- as.matrix(num)
rownames(num) <- rownames
return(num)
}
## TODO:: Make this a unit test
## all.equal(num[rownames(SN), colnames(SN)], SN) |
0ae420e65994c01230d0d12f3b0b97999dcdd31f | 1ca0218682294cf8d638022589fdac63456fe540 | /Documents/BigDataCourse/Projects/Project 3-1/UCI HAR Dataset/Submitted files/run_analysis.R | 7c662ef8ea4cbcc7dd73bc0bc2f6ef85507d5d6b | [] | no_license | Sausan/Tidy-Data | c32109cb50408c4422ecb6eb3f38461373e14b15 | 2774bfb21b3003ebb6c9a77de199e1f864fd853e | refs/heads/master | 2021-01-15T17:07:07.946302 | 2015-03-22T16:17:43 | 2015-03-22T16:17:43 | 31,187,230 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,739 | r | run_analysis.R | run_analysis <- function(){
library(dplyr)
# 1- read the test data frames from the three files in the test folder
test_subject <- read.table("test/subject_test.txt")
X_test <- read.table("test/X_test.txt")
Y_test <- read.table("test/Y_test.txt")
# 2- read the features from the feature file in the main folder
names_test <- read.table("features.txt")
# 3- assign a header to the data in each data frame
names(test_subject)<-c("subject_id")
names(X_test) <- names_test[,2]
names(Y_test) <- c("activity_label")
# 4- generate the complete test_data
test_data <- cbind(test_subject, Y_test, X_test)
# 5- read the training data frames from the three files in the train folder
train_subject <- read.table("train/subject_train.txt")
X_train <- read.table("train/X_train.txt")
Y_train <- read.table("train/Y_train.txt")
# 6- read the features from the feature file in the main folder
names_train <- read.table("features.txt")
# 7- assign a header to the data in each data frame
names(train_subject)<-c("subject_id")
names(X_train) <- names_test[,2]
names(Y_train) <- c("activity_label")
# 8- generate the complete test_data
train_data <- cbind(train_subject, Y_train, X_train)
# 9- Merge the two data sets
All_data <- rbind(test_data,train_data)
# 10 - Extracts only the measurements on the mean and standard deviation for each measurement
## First remove the duplicates
Col_names <- names(All_data)
new_names<-make.names(Col_names, unique = TRUE, allow_ = FALSE)
names(All_data)<-new_names
# 11-Select the columns that have mean and standard Deviation only
mean_data <- select(All_data, matches("subject.id"), matches("activity.label"),grep(".mean..",names(All_data)),grep(".std..",names(All_data)))
#print (dim(mean_data))
#print (head(mean_data))
# 12 - Uses descriptive activity names to name the activities in the data set
## Read the activitiy names from the activity file
activity_name <- read.table("activity_labels.txt")
# 13- creat names to the descriptive activity variables
names(activity_name)<-c("activity.label", "activity.Name")
## The dataset with descriptive activity names
descriptive_data <- merge(mean_data,activity_name, all=TRUE)
#14- creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_data <- descriptive_data %>% group_by(subject.id, activity.label,activity.Name) %>% summarise_each(funs(mean))
print(names(tidy_data))
# Write the tidy data into a text file
write.table(tidy_data, file = "tidy_data.txt",sep=",", qmethod = "double")
print("Done!!")
# return(head(All_data[1:3]))
} |
5fcfb359e04761668abfdcf1f6df4fbe792aa5b9 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/r/generated/R/ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfo.r | 7569edec2f0cff3e49653d1cde57af19c20da66a | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | R | false | false | 4,558 | r | ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfo.r | # Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfo <- R6::R6Class(
'ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
},
toJSON = function() {
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject <- list()
if (!is.null(self$`pid`)) {
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject[['properties']] <- self$`properties`$toJSON()
}
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject
},
fromJSON = function(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoJson) {
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject <- jsonlite::fromJSON(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoJson)
if (!is.null(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`pid`)) {
self$`pid` <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`pid`
}
if (!is.null(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`title`)) {
self$`title` <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`title`
}
if (!is.null(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`description`)) {
self$`description` <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`description`
}
if (!is.null(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`properties`)) {
propertiesObject <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON()
)
},
fromJSONString = function(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoJson) {
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject <- jsonlite::fromJSON(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoJson)
self$`pid` <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`pid`
self$`title` <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`title`
self$`description` <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$`description`
ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckPropertiesObject <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckProperties$new()
self$`properties` <- ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckPropertiesObject$fromJSON(jsonlite::toJSON(ComAdobeGraniteQueriesImplHcQueryLimitsHealthCheckInfoObject$properties, auto_unbox = TRUE))
}
)
)
|
dc1ce7d5364794b6ad14c062e30e2260c50ba645 | 94c16636d7d4c98c918fde5096cf3a4118c02415 | /R/lincomb.R | 76eecaaf6eb8d520d04ff4bb7208a5e016ca2d21 | [] | no_license | RandiLGarcia/dyadr | 66c87d6be3b3eb4e7bf37568dc43f6e037d34961 | 5d317dceb2e278887b9684e172bd79a0c12974af | refs/heads/master | 2021-07-14T20:50:59.289227 | 2021-03-17T13:27:55 | 2021-03-17T13:27:55 | 61,908,363 | 17 | 14 | null | 2020-07-29T15:24:03 | 2016-06-24T19:43:21 | R | UTF-8 | R | false | false | 1,330 | r | lincomb.R | #' @name lincomb
#' @title Tests of contrasts
#'
#' Test the sum (S), the average (A), or the difference (D) of two effects from the same model.
#'
#'
#' @param outp is the model object. For exmaple, summary(mod). It can be a gls or lme object.
#' @param v1 is the number of the first effect.
#' @param v2 is the number of the second effect.
#' @param fun is the comparison. Default fun = "D". Other options include "S" the sum, and "A" the average.
#' @importFrom stats pnorm
#' @details some additional details about these functions
#' @export
#'
#
lincomb <- function(outp,v1,v2,fun="D"){
outp = summary(outp)
vval = matrix()
if(class(outp)[2]=="gls"){
c1=as.numeric(outp$coefficients[v1])
c2=as.numeric(outp$coefficients[v2])
cccov =outp$varBeta[v2,v1]
}
if(class(outp)[2]=="lme"){
c1=as.numeric(outp$coefficients$fixed[v1])
c2=as.numeric(outp$coefficients$fixed[v2])
cccov =outp$varFix[v2,v1]
}
c1se=outp$tTable[v1,2]
c2se=outp$tTable[v2,2]
if(fun=="A")vval[1]=(c1+c2)/2
if(fun=="S")vval[1]=c1+c2
if(fun=="D")vval[1]=c1-c2
if(fun=="A")vval[2]=(c1se^2+c2se^2+2*cccov)/4
if(fun=="S")vval[2]=c1se^2+c2se^2+2*cccov
if(fun=="D")vval[2]=c1se^2+c2se^2-2*cccov
vval[2]=sqrt(vval[2])
vval[3] = 2-2*pnorm(abs(vval[1]/vval[2]))
return(vval)
} |
7942b0244f0fb6f2bbb672ea126594a9adbeb448 | 7cccd60294728a159c0063cdda0798905ab03ecf | /SVM.R | 0e86ba50b6281e743018fd02150b46cb1f52c244 | [] | no_license | kalitiptur/mycode-R | fb70756b70f2ab02ac0a11bed0d6eec2729614d9 | fb1a0d9a631806bbb822e8fd2ce33691b59ddbdd | refs/heads/master | 2021-09-04T16:58:39.569352 | 2020-11-30T06:56:46 | 2020-11-30T06:56:46 | 191,102,554 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 301 | r | SVM.R | library(e1071)
set.seed(1)
x = matrix(rnorm(20*2), ncol = 2)
x
y = c(rep(-1,10), rep(1, 10))
y
x[y==1,]=x[y==1,] + 1
plot(x, col = (3-y))
dat = data.frame(x=x, y = as.factor(y))
head(dat)
svmfit = svm(y~.,data = dat, kernel = "linear" , cost = 10,scale = FALSE )
plot(svmfit, dat)
|
fc51a2d4ce6ac46b5077c0d34abe07984272c565 | feaf72289a4f75ddf283fd4319a69ba603931432 | /Session8_TimeSeries_II/02_VSN.r | ec51df7773dc977ba5e5924ab2a67756c768a31b | [] | no_license | macomino/series-temporales | 41523a74a1cfdcab9b96159f124a33dfa564d94d | aca4899ef3c006f44bec8def0fa897077d11afa0 | refs/heads/master | 2020-04-27T20:46:23.689077 | 2019-03-15T20:52:56 | 2019-03-15T20:52:56 | 174,670,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,576 | r | 02_VSN.r | autoplot(elec)
BoxCox.lambda(elec)
autoplot(BoxCox(elec, lambda = 0.26))
############################################################
# DATA TRANSFORMATION
###########################################################
# Ajustamos datos de series temporales porque en gral, datos mas limpios y claros
# nos llevan a una mejor y más sencilla predicción.
# Hay cuatro tipos de ajustes:
# ajustes de calendario
dframe <- cbind(Monthly = milk,
DailyAverage = milk/monthdays(milk))
autoplot(dframe, facet=TRUE) +
xlab("Years") + ylab("Pounds") +
ggtitle("Milk production per cow")
# ajustes de poblacion (per 100.000, per 10^6...)
# ajustes por inflación
# If zt denotes the price index and yt denotes the
# original house price in year t, then xt=yt/zt∗z2000
# gives the adjusted house price at year 2000 dollar values.
# transformaciones matemáticas
# Al final, se trata de quiar todas las fuentes de variacion
# conocidas con el fin de tener menos variabilidad que explicar
# DATOS MAS CLAROS = PREDICCIONES MAS PRECISAS
###### NOTA: Predicciones sobre descomposiciones son mejores
fit <- stl(elecequip, t.window=13, s.window="periodic")
fit %>% seasadj() %>% naive() %>%
autoplot() + ylab("New orders index") +
ggtitle("ETS forecasts of seasonally adjusted data")
fit %>% forecast(method='naive') %>%
autoplot() + ylab("New orders index") + xlab("Year")
elecequip %>% stlf(method='naive') %>%
autoplot() + ylab("New orders index") + xlab("Year")
seriesen <- ts(sin(c(0:50)))
autoplot(seriesen)
autoplot(snaive(seriesen,100))
autoplot(naive(seriesen,10))
autoplot(rwf(seriesen,10))
################## VSN ###################################
# use three variance stabilizating methods on souvenir data
autoplot(ts(log(souvenir),start=1,frequency=12))
autoplot(ts(sqrt(souvenir),start=1,frequency=12))
autoplot(ts((souvenir)^1/3,start=1,frequency=12))
autoplot(ts(elec,start=1,frequency=12))
autoplot(ts(log(elec),start=1,frequency=12))
autoplot(ts(sqrt(elec),start=1,frequency=12))
autoplot(ts(elec^(1/3),start=1,frequency=12))
autoplot(BoxCox(elec,lambda = 1/3))
# BoxCox puede encontrar de manera automatica la
# lambda que mejor estabilice los datos
lambda=BoxCox.lambda(elec)
autoplot(BoxCox(elec,lambda))
# la funcion de prediccion snaive busca lambda automáticamente
fit<-snaive(elec,lambda=1/3)
autoplot(fit,include=120)
# Ejercicio 2: Busca lambda para el data set gas
lambda=BoxCox.lambda(gas)
fit<-snaive(gas,lambda=lambda)
autoplot(fit,include=120)
################## Bias adjustment #########################
# Si se realiza el ajuste de una serie usando transformaciones de Box-Cox
# la "back-transformation" de la media es la mediana en la escala original.
# Esto puede ser un pb si por ejemplo queremos sumar territorios.
# EN esos casos podemos preferir un ajuste del sesgo
#Forecasts of egg prices using a random walk with drift applied
#to the logged data (lambda=0)
#Notice how the skewed forecast distribution pulls up the
#point forecast when we use the bias adjustment.
#Bias adjustment is not done by default in the forecast package.
#If you want your forecasts to be means rather than medians,
#use the argument biasadj=TRUE when you select your Box-Cox
#transformation parameter.
fc <- rwf(eggs, drift=TRUE, lambda=0, h=50, level=80)
fc2 <- rwf(eggs, drift=TRUE, lambda=0, h=50, level=80,
biasadj=TRUE)
autoplot(eggs) +
autolayer(fc, series="Simple back transformation") +
autolayer(fc2, series="Bias adjusted", PI=FALSE) +
guides(colour=guide_legend(title="Forecast"))
|
aa8ee58fe6d62dbc6e3ffb9598c96562748a6f76 | 8f536537be5bf214525ea11bb84c568c9fb82fe7 | /R/gibbs_bin.R | 8bed3b4ae34a5ce9f16b87c532cbaf56d442d751 | [
"MIT"
] | permissive | yuliasidi/bin2mi | 5fa742f72d21034c7def62bb30d078e63c18d2ff | 51ec9b77d0afb0498ca59fbb91fd71e80479dede | refs/heads/master | 2021-06-22T15:01:14.716873 | 2021-02-20T18:53:31 | 2021-02-20T18:53:31 | 197,215,389 | 0 | 0 | NOASSERTION | 2021-02-20T18:53:32 | 2019-07-16T15:00:09 | R | UTF-8 | R | false | false | 1,007 | r | gibbs_bin.R | #' @title Gibbs sampling for beta-binomial distribution
#' @description performs Gibbs sampling for beta-binomial distribution
#' @param dt tibble
#' @param B numeric, Default: 1000, number of iterations
#' @param y.m string, Default: 'y.m', column with incomplete binary data
#' @return complete dataset after B iterations of Gibbs sampler
#' @seealso
#' \code{\link[stats]{Beta}},\code{\link[stats]{Binomial}}
#' @rdname gibbs_bin
#' @export
#' @importFrom stats rbeta rbinom
gibbs_bin <- function(dt, B = 1000, y.m = 'y.m'){
pstar <- vector('numeric',length = B)
y.im <- gsub('\\.m','.im',y.m)
for(i in 1:B){
if(i==1){
v <- dt[dt$r==0,y.m]
pstar.old <- Inf
}else{
v <- dt[[y.im]]
}
vec <- table(v) + 1
names(vec) <- c('fail','success')
pstar[i] <- stats::rbeta(n = 1, shape1 = vec[['success']], shape2 = vec[['fail']])
dt[[y.im]] <- dt[[y.m]]
dt[dt$r==1,y.im] <- stats::rbinom(sum(dt$r==1), 1, pstar[i])
}
dt$pstar <- pstar[B]
dt
}
|
9c720c64b62595a83aa5c697bed1b2400303985b | c555c086b271eaca27472410f3aa5c97709958d9 | /tests/testthat/dummy.R | 7cbc62fdd8ac1009f548cddc3fdd00b897133d4b | [
"MIT"
] | permissive | filipezabala/embedr | c3e9e299bc1ed9a8e7f811a96c7e53b60fe53b73 | 64eee3d975392c20f2242c9663f607e3790a322e | refs/heads/master | 2023-03-20T14:37:29.654223 | 2020-07-09T07:54:50 | 2020-07-09T07:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 72 | r | dummy.R | # this file just exists for the sake of testing the is.local() function
|
51d15895ac285d3cc0e9a8992bb028f1f9e446ff | 0e595bb86c1a6751c169a32383281ff233d27f40 | /man/qtlSignTest.Rd | 1c679c9d1cfa6db157b1214ea7507f673a96a313 | [] | no_license | pinbo/qtlTools | bd4b5e684c7353eedacc5cf48aec3344a7caa5d2 | 96f6b61e255314f6a5a32e38105c160dc52c037a | refs/heads/master | 2022-01-04T23:47:17.044866 | 2018-10-02T20:52:47 | 2018-10-02T20:52:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 942 | rd | qtlSignTest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtlSignTest.R
\name{qtlSignTest}
\alias{qtlSignTest}
\title{Conduct the sign test for expression bias}
\usage{
qtlSignTest(effect = NULL, trans.effect = NULL, verbose = T, ...)
}
\arguments{
\item{effect}{Signed cis-eQTL or allele-specific expression log2-fold change data}
\item{trans.effect}{If effect is signed cis-eQTL effect, one can test the independence
of cis and trans eQTL evolution. Here provide effect effect of trans QTL.}
\item{verbose}{should results be printed to console?}
\item{...}{Additional arguments to pass on to fisher.test / binom.test, such as
null hypotheses.}
\item{id}{label for output}
}
\value{
The results of a binomial test (or Fisher's exact test if trans.dir is specified).
}
\description{
\code{qtlSignTest} Uses the Fraser etc. sign test to assess neutral evolution
for a set of expression phenotypes
}
\details{
See ...
}
|
b81be94afecb4e84bc6af2db46dfe99b9f6e678f | 4f094c97c55155204a7b56a6a316646d4868570c | /TSP.R | e585ff7fb484491bf7e37653d52d3e29f84e28d0 | [] | no_license | Super-rookie-Py/DataRst04-TSF | 9f050800bae38c022c1264bd41dca6a93f4bd504 | 12a5bb4e1760563142c10421056dbe0fe2d9b289 | refs/heads/master | 2022-08-12T20:00:40.753067 | 2020-05-20T14:26:56 | 2020-05-20T14:26:56 | 265,590,754 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,781 | r | TSP.R | ### 2020/05/20 keonwoo Park
## Data Structure 2
## 최단거리구하기
####### Distance Table
data_distance<-matrix(,nrow=5,ncol=5)
data_distance[,]=Inf
data_distance[1,2]=1
data_distance[1,3]=3
data_distance[1,5]=2
data_distance[2,1]=1
data_distance[2,3]=1
data_distance[3,2]=1
data_distance[3,1]=3
data_distance[3,4]=2
data_distance[4,3]=2
data_distance[4,5]=2
data_distance[5,1]=2
data_distance[5,4]=2
### Shortest path(최단거리)
data_distance_1<-NULL
temp_distance<-data.frame(Origin=0,Destination=0,Distance=0)
## index table
index_distance<-data.frame(Origin=1:5,Index=rep(0,time=5))
i1=0
pre_i=0
for(i in 1:5){
for(j in 1:5){
if(is.finite(data_distance[i,j])){
temp_distance$Origin=i
temp_distance$Destination=j
temp_distance$Distance<-data_distance[i,j]
data_distance_1<-rbind(data_distance_1,temp_distance)
## index table
i1=i1+1
if(pre_i!=i){
index_distance$Index[i]=i1
pre_i=i
}
}
}
}
Random_Sequence<-function(x,k,rep=F){
n=length(x)
if(rep==F){
for(i in 1:k){
j=floor(runif(1,min=i,max=(n+1)))
temp_value=x[j]
x[j]=x[i]
x[i]=temp_value
}
}else{
x=x[floor(runif(k,min=1,max=(n+1)))]
}
return(x[1:k])
}
Random_Sequence(1,2)
##################################################################################
Random_Sequence_2<-function(d){
size_d=length(d)
for(i in 1:(size_d-1)){
random_index = ceiling(i+runif(1)*(size_d+1-i)-1)
temp_value=d[random_index]
d[random_index]=d[i]
d[i]=temp_value
}
return(d)
}
Selection_Sort_df<- function(d,col_num=1,decreasing=FALSE){
size_d <- length(d[,1])
if(decreasing == FALSE){
for(i1 in 1:(size_d-1)){
min_value = d[i1, col_num]
min_index=i1
for(i2 in (i1+1):size_d){
if(d[i2,col_num]<min_value){
min_value=d[i2,col_num]
min_index=i2
}
}
### Swap
tem_value = d[i1,]
d[i1,] = d[min_index,]
d[min_index,] = tem_value
}
}else{
for(i1 in 1:(size_d-1)){
max_value = d[i1,col_num]
max_index=i1
for(i2 in (i1+1):size_d){
if(d[i2,col_num]>max_value){
max_value=d[i2,col_num]
max_index=i2
}
}
### Swap
tem_value = d[i1,]
d[i1,] = d[max_index,]
d[max_index,] = tem_value
}
}
return(d)
}
### TSP
n= 100
result<-data.frame(x1=rep(0,n),x2=rep(0,n),x3=rep(0,n),
x4=rep(0,n), x5=rep(0,n), distance=rep(0,n))
for(i1 in 1:n){
Sequence=Random_Sequence_2(1:5)
distance=0
for(i2 in 1:4){
distance=distance + data_distance[Sequence[i2],Sequence[i2+1]]
}
distance=distance + data_distance[Sequence[5],Sequence[1]]
result[i1,1:5] =Sequence
result$distance[i1]=distance
}
Selection_Sort_df(result,6)
### TSP
set.seed(1234)
x=runif(100,0,100)
y=runif(100,0,100)
data_distance<-matrix(nrow=100,ncol=100)
for(i1 in 1:99){
for(i2 in (i1+1):100){
data_distance[i1,i2]=((x[i1]-x[i2])^2+(y[i1]-y[i2])^2)^(1/2)
data_distance[i2,i1]=data_distance[i1,i2]
}
}
data_distance
summary(data_distance)
#######################################
n= 100
result<-data.frame(x1=rep(0,n),x2=rep(0,n),x3=rep(0,n),
x4=rep(0,n), x5=rep(0,n), distance=rep(0,n))
for(i1 in 1:n){
Sequence=Random_Sequence_2(1:5)
distance=0
for(i2 in 1:4){
distance=distance + data_distance[Sequence[i2],Sequence[i2+1]]
}
distance=distance + data_distance[Sequence[5],Sequence[1]]
result[i1,1:5] =Sequence
result$distance[i1]=distance
}
Selection_Sort_df(result,6) |
04dc3809fe443c6adacc2c4e2e26f02a3329fb7e | 9a2e827e3c2a1e739a3222fbb2a30705d18a7a17 | /Graphs Function.R | 90d580f19fbb5760ca35c674b80c8a50cdde4ac9 | [] | no_license | darshan2696/My-Codes | 53192ce2873f01ca88f0b07e19109b9a7aaea7d6 | d7548cd98c84bd7ac6e1d54a0551112188948559 | refs/heads/master | 2022-12-18T11:22:37.584467 | 2020-09-24T07:13:29 | 2020-09-24T07:13:29 | 281,865,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,500 | r | Graphs Function.R | # A FUNCTION THAT EXPORTS GRAPHS
#----------------------------------
View(cars)
Graphs <- function(data,var= 1:ncol(data), direct= "",tresh=10) #feed your working directry path into direct
{
setwd(direct)
for (i in var) #var is the number of choosen columns in the dataset, by default all columns will be taken
{
test = table(data[,i])
if(is.numeric(data[,i]) && length(test) > tresh/100*nrow(data)) #tresh is used to ensure that the categorical and numerical variables are classified correctly
{
png(paste(names(data)[i], ".png", sep="")) #NOTE this step
par(mfrow=c(2,1)) #used to display 2 graphs in a single picture
boxplot(data[,i], main = paste("Boxplot of", names(data)[i]), #boxplot
ylab = names(data)[i], col = "maroon", border = "grey5",
horizontal = T)
hist(data[,i], main = paste("Histogram of", names(data)[i]), #histogram
xlab = names(data)[i], ylab = "No. of Houses", col = "lightgreen", border=F)
dev.off() #export
}
else
{
png(paste(names(data)[i], ".png", sep=""))
par(mfrow=c(2,1))
barplot(table(data[,i]) , main = paste("Barplot of", names(data)[i]), #barplot
ylab = names(data)[i], col = "maroon", border = "grey5" )
pie(table(data[,i]) , main = paste("Piechart of", names(data)[i]), #pie chart
ylab = names(data)[i], col = "lightgreen")
dev.off()
}
}
}
#Explanation:
#-------------
par(mfrow=c(2,1))
boxplot(cars[,2], main = paste("Boxplot of", names(data)[2]), #boxplot
ylab = names(data)[2], col = "maroon", border = "grey5",
horizontal = T)
hist(cars[,2], main = paste("Histogram of", names(data)[2]), #histogram
xlab = names(data)[2], ylab = "No. of Houses", col = "lightgreen", border=F)
Graphs(cars,direct="C:\\Users\\ethic\\Desktop\\test")
#example:
#Graphs(cars,c(1,3,5,10,11))
#here, only the graphs relating to columns 1,2,5,10,11 will get eporting to you set
#working directory
|
2faa169116d06052c162e0bbf7aecf7a65da8a2d | 340f0cdacd7bd1994627cb34203915bd17d56186 | /R/normalizer.R | 7b1f23682080eb77faedc53c7b74b82a88c31be6 | [] | no_license | PCRuniversum/chipPCR | af593b813f8c59d5027d8a118955666f0fff283e | b7c751a8716c814c63825d50007699dbfb7a22f4 | refs/heads/master | 2023-03-11T02:18:02.994570 | 2021-02-27T20:04:47 | 2021-02-27T20:04:47 | 19,281,268 | 2 | 3 | null | 2020-07-27T13:48:14 | 2014-04-29T15:22:30 | R | UTF-8 | R | false | false | 903 | r | normalizer.R | normalizer <- function(y, method.norm = "none", qnL = 0.03) {
if (qnL <= 0.001 || qnL >= 0.999)
stop("qnL must be within 0.001 and 0.999.")
# Select a method for the normalization
method.norm <- check.method(c("none", "luqn", "minm", "max", "zscore"), method.norm)
# TODO Test meaningfulness of qnL
switch(method.norm,
none = do.call(function(y) y, c(list(y = y))),
minm = do.call(function(y) (y - min(y)) / (max(y) - min(y)),
c(list(y = y))),
max = do.call(function(y) (y / max(y)),
c(list(y = y))),
luqn = do.call(function(y, qnL) (y - quantile(y, qnL)) /
(quantile(y, 1 - qnL) - quantile(y, qnL)),
c(list(y = y, qnL = qnL))),
zscore = do.call(function(y) (y - mean(y)) / sd(y),
c(list(y = y)))
)
} |
ae817912397b72fee42464a50f254c79852f04a7 | 1bc0fe0762e4ee96144c521211f7ddf4fec54be1 | /code/2_2AllHabitatsNMDS.R | 21396c29fb9c19f3d07f5374356695fda035985e | [] | no_license | melaniekamm/IthacaBees_BySeasonScale | 6e66573e2895af02c45778a9a299003b8e425dc9 | 99080523594b658fa0f2e7e20aff0434f9c317c4 | refs/heads/main | 2023-06-23T21:47:15.470526 | 2023-06-15T21:08:12 | 2023-06-15T21:08:12 | 363,954,203 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,076 | r | 2_2AllHabitatsNMDS.R | library(dplyr)
rm(list=ls())
if (!dir.exists('./figures')) {
dir.create('./figures')
dir.create('./figures/supplementary')
dir.create('./figures/supplementary/NMDS_plants')
}
plant_rich <- read.csv('./data/Iverson_plant/allplants/richness_by_site.csv')%>%
dplyr::mutate(Site= gsub(Site, pattern='Black_Diamond',
replacement='Merwin'))
cover_long <- read.csv('./data/Iverson_plant/plant_cover_by_species.csv')
cover_long <- dplyr::filter(cover_long, !subplot %in% c('Plot A', 'Plot B',
'Plot C', 'D', 'E', 'No_canopy_trees'))
cover_long <- dplyr::mutate(cover_long, Site = gsub(Site, pattern='Black_Diamond',
replacement='Merwin', fixed=T)) %>%
dplyr::mutate(site_plot = paste0(Site, "_", subplot),
Genus_species = paste0(Genus, ".", species)) %>%
dplyr::group_by(Genus_species, Genus, species, family, common, Site) %>%
dplyr::summarise(mean_pct_cover = sum(pct_cover)) %>%
dplyr::ungroup() %>%
dplyr::mutate(family = as.character(family))
#add one missing family
cover_long$family[cover_long$Genus == 'Cuscuta'] <- 'Convolvulaceae'
#NMDS ordination on plant community data
colors <- read.csv('./data/misc/habitat_colors.csv') %>%
dplyr::mutate(color2 = rev(color))
#try a different package for ordination and plotting
#devtools::install_github("MadsAlbertsen/ampvis2")
library(ampvis2); library(ggplot2)
#get species data in correct format
speciessite <- dplyr::mutate(cover_long, rounded_cover = mean_pct_cover*10,
Genus_species = as.factor(Genus_species),
Site = as.factor(Site)) %>%
dplyr::select(-common, -mean_pct_cover) %>%
tidyr::spread(key=Site, value=rounded_cover) %>%
replace(is.na(.), 0) %>%
dplyr::rename(Family=family, Species= species, OTU=Genus_species) %>%
dplyr::mutate(Kingdom = 'Plantae', Phylum = "A", Class= "B", Order="C") %>%
dplyr::select(Kingdom, Phylum, Class, Order, Family, Genus, Species, dplyr::everything())
a <- speciessite[,1:7]
b <- speciessite[,8:length(speciessite)]
speciessite <- cbind(b,a)
md <- dplyr::rename(plant_rich, SampleID=Site)
ampdata <- amp_load(otutable = speciessite, metadata = md)
t <- amp_ordinate(data=ampdata, type='NMDS', distmeasure='bray', transform='none', filter_species = 0,
species_plot = F, species_label_taxonomy = 'OTU',
sample_colorframe = 'habitat',
sample_colorframe_label="habitat",
sample_color_by='habitat', #sample_point_size=4,
species_nlabels = 0, print_caption = T,
detailed_output = T)
t$plot + scale_color_manual(values= as.character(colors$color)) +
scale_fill_manual(values= as.character(colors$color)) +
guides(color=guide_legend(ncol=1)) +
theme_classic()
# ggsave(device='svg', filename='PlantComm_NMDS_AllHabitats.svg', path='./figures/supplementary/NMDS_plants',
# width=7, height=6.25)
tlab <- amp_ordinate(data=ampdata, type='NMDS', distmeasure='bray', transform='none',
filter_species = 0,
species_plot = T, species_label_taxonomy = 'OTU',
sample_colorframe = 'habitat',
#sample_colorframe_label="habitat",
sample_color_by='habitat', #sample_point_size=4,
species_nlabels = 25,
print_caption = F,
detailed_output = T)
tlab$plot + scale_color_manual(values= as.character(colors$color)) +
scale_fill_manual(values= as.character(colors$color)) +
guides(color=guide_legend(ncol=1)) +
theme_classic()
# ggsave(device='svg', filename='PlantComm_NMDS_AllHabitats_SpeciesNames.svg', path='./figures/supplementary'/NMDS_plants,
# width=7, height=6.25)
#
# axes <- dplyr::select(t$plot$data, SampleID, NMDS1, NMDS2)
# write.csv(axes, './data/Iverson_plant/allplants/NMDS_Ordination_Axis_Loadings.csv')
|
5973c2630b60f0df54e48554d2e358810b034ef8 | 96895e3d650501bb3ebf4fa4bcbd4ccc8aed3999 | /Part1Exercises/ICA/STAT3019ICA1/step 2 K-means.R | 170d6b905c872465dd5d137f57738db1f4ad2cba | [] | no_license | linyina/Clustering-in-R | 5570fad12c872296250d84b66d91df2785aa1888 | c33a466fe421a2bb8808ac11dcbf94d365cb3a92 | refs/heads/master | 2020-03-22T11:46:06.990177 | 2018-08-14T08:50:34 | 2018-08-14T08:50:34 | 139,994,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,479 | r | step 2 K-means.R | ############################################
##### Step 2: Kmeans clustering #####
############################################
## random try
sdortmundk5<- kmeans(sdortmund, centers = 5, nstart = 100)
pairs(sdortmund, col=sdortmundk5$cluster, pch=clusym[sdortmundk5$cluster])
sdortmundk5$centers
## the means (scaled) of each clusters in different factors
## Cluster
cg1<- clusGap(dortmund,kmeans,20,B=100,d.power=2,spaceH0="scaledPCA",nstart=100) # 1???
plot(cg1)
print(cg1, method = "firstSEmax")
plot(1:20,cg1$Tab[,1],xlab="k",ylab="log S_k",type="l")
points(1:20,cg1$Tab[,2],xlab="k",ylab="log S_k",type="l",lty=2)
legend("topright",c("log S_k in data","E(log S_k) uniform"),lty=1:2)
dev.copy(pdf,"clusgap logsk plots.pdf")
dev.off()
help(legend)
#### scale
sdortmund<- scale(dortmund)
pairs(dortmund)
cg2<-clusGap(sdortmund, kmeans, K.max = 20, B=100, d.power = 2, iter.max=20,spaceH0 = "original", nstart=100)
plot(cg2)
dev.copy(pdf, "clusgap_scaled plots.pdf")
dev.off()
print(cg2, method = "Tibs2001SEmax")
print(cg2, method = "firstSEmax")
plot(1:20,cg2$Tab[,1],xlab="k",ylab="log S_k",type="l", ylim=c(6,10))
points(1:20,cg2$Tab[,2],xlab="k",ylab="log S_k",type="l",lty=2)
legend("topright",c("log S_k in data","E(log S_k) uniform"),lty=1:2)
dev.copy(pdf, "clusgap_logSKplots_original_scaled")
dev.off()
cg3<- clusGap(sdortmund, kmeans, K.max = 20, B=100, d.power = 2, iter.max=20, spaceH0 = "scaledPCA", nstart=100)
print(cg3, method="Tibs2001SEmax")
print(cg3, method = "firstSEmax") # 3 unchanged
plot(cg3) # values of gap
dev.copy(pdf, "clusgap_scaled plots_scaledPCA.pdf")
dev.off()
plot(1:20,cg3$Tab[,1],xlab="k",ylab="log S_k",type="l", ylim=c(6,10))
points(1:20,cg3$Tab[,2],xlab="k",ylab="log S_k",type="l",lty=2)
legend("topright",c("log S_k in data","E(log S_k) uniform"),lty=1:2)
dev.copy(pdf, "clusgap_logSKplots_scaledPCA_scaled")
dev.off()
adjustedRandIndex(kmbundestag5$cluster,wbundestag5)
####### PAM
dortmundp5 <- pam(dortmund, 5)
help(pam)
####### Distances
eusdortmund <- dist(sdortmund,method="euclidean")
mansdortmund <- dist(sdortmund,method="manhattan")
plot(eusdortmund, mansdortmund)
olivecov <- cov(olive)
molive <- as.matrix(olive)
for (i in 1:572)
mahalm[i,] <- mahalanobis(molive,molive[i,],olivecov)
mahalm <- matrix(0,ncol=170,nrow=170)
sdortmundcov<- cov(sdortmund)
sdortmund1 <- as.matrix(sdortmund)
for (i in 1:170)
mahalm[i,] <- mahalanobis(sdortmund1, sdortmund1[i,], sdortmundcov)
help("mahalanobis")
|
b9b90e6410d6e78e307b99f1f979becdfe31e42a | 1b983bf2ce7086842e0aaa737bc553cf57dcab6a | /1_dataprocessing/patch1_2_1.R | ca3a8ae194db85c19bc680c3da72fc8b697fd495 | [] | no_license | floatSDSDS/Yelp_Dataset_regionPredictor | 22cc6a66480ee2fbea329c630777b7ccfa93fd82 | 7a1d8901a0fe603b0c0e6ff6af752fcf81958edf | refs/heads/master | 2021-01-12T12:44:30.369839 | 2016-10-03T08:14:21 | 2016-10-03T08:14:21 | 69,851,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,829 | r | patch1_2_1.R | i<-1;
set_test<-list();
for (i in 1:25){
temp<-1;
len<-length(test[[i]][2]$date);
##################################################test
########################################attribute
##################################wifi,noise
wifi<-test[[i]][[3]]$Wi.Fi;
temp[wifi=="paid"]<-0;
temp[wifi=="free"]<-2;
temp[is.na(temp)]<-1;
wifi<-temp;
noise<-test[[i]][[3]]$Noise.Level;
temp[noise=="loud"]<-1;
temp[noise=="quiet"]<-3;
temp[noise=="very_loud"]<-0;
temp[is.na(temp)]<-2;
noise<-temp;
##################################parking
##################################price.range,stars,business_id
price<-as.numeric(test[[i]][[3]]$Price.Range);
price[is.na(price)]<-0;
stars<-test[[i]][[3]]$stars;
business_id<-test[[i]][[3]]$business_id;
####################################
attributes<-data.frame(wifi,noise,price,stars,business_id);
########################################basic
##################################lon,lat,rev_c,stars,bs_id
longitude<-as.vector(test[[i]][[4]]$longitude);
longitude<-as.numeric(longitude);
latitude<-as.vector(test[[i]][[4]]$latitude);
latitude<-as.vector(latitude);
stars_busi<-test[[i]][[4]]$stars;
review_count<-test[[i]][[4]]$review_count;
business_id<-test[[i]][[4]]$business_id;
##################################state,city
state<-test[[i]][[4]]$state;
city<-test[[i]][[4]]$city;
basic<-data.frame(longitude,latitude,stars_busi,
review_count,state,city,business_id);
set_test$user_id[[i]]<-training[[i]][[1]];
set_test$basic[[i]]<-basic;
set_test$attributes[[i]]<-attributes;
}
rm(attributes,basic,business_id,city,i,j,k,latitude,longitude,len,noise,
price,review_count,stars_busi,stars,state,temp,wifi);
rm(test,training); |
e1401b70bcd63d75782f69e435e4aa984d6260d1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/tsDyn/tests/lstar.R | 8dc0832df91fcd76ed9f389043828949952d889e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,552 | r | lstar.R | library(tsDyn)
mod.lstar <- lstar(log10(lynx), m=2, mTh=c(0,1), control=list(maxit=3000))
mod.lstar
deviance(mod.lstar)
c(AIC(mod.lstar),BIC(mod.lstar))
mod.lstar2 <- lstar(log10(lynx), m=1, control=list(maxit=3000))
mod.lstar2
deviance(mod.lstar2)
c(AIC(mod.lstar2),BIC(mod.lstar2))
## include: none
mod.lstar_noConst <- lstar(log10(lynx), m=2, control=list(maxit=1000), include="none")
mod.lstar_noConst
deviance(mod.lstar_noConst)
c(AIC(mod.lstar_noConst),BIC(mod.lstar_noConst))
## include: trend
mod.lstar_trend <- lstar(log10(lynx), m=2, control=list(maxit=1000), include="trend")
mod.lstar_trend
deviance(mod.lstar_trend)
c(AIC(mod.lstar_trend),BIC(mod.lstar_trend))
## include: both
mod.lstar_both <- lstar(log10(lynx), m=2, control=list(maxit=1000), include="both")
mod.lstar_both
deviance(mod.lstar_both)
c(AIC(mod.lstar_both),BIC(mod.lstar_both))
## grid attributes
mod.lstar3 <- lstar(log10(lynx), m=2, control=list(maxit=3000), starting.control=list(gammaInt=c(1,1000), nTh=100))
mod.lstar3
deviance(mod.lstar3)
c(AIC(mod.lstar3),BIC(mod.lstar3))
mod.lstar_ALL <- list(mod.lstar=mod.lstar, mod.lstar2=mod.lstar2,
mod.lstar_noConst=mod.lstar_noConst,mod.lstar_trend=mod.lstar_trend,
mod.lstar_both=mod.lstar_both,mod.lstar3=mod.lstar3)
sapply(mod.lstar_ALL, function(x) c(AIC=AIC(x), BIC=BIC(x), deviance=deviance(x)))
sapply(mod.lstar_ALL, function(x) tail(coef(x),4))
sapply(mod.lstar_ALL, function(x) tail(coef(x,hyperCo=FALSE),4))
sapply(mod.lstar_ALL, function(x) head(x$model,2))
|
7c259256a5857d97ced9c0d51e12468e3bd0bb53 | cc3af520071dae6080c9abd70d08013eb769ec0a | /man/bootmatch.Rd | 01719001540570c1816c7725fea7df86db103e24 | [] | no_license | Libardo1/PSAgraphics2 | 432c8a6c68c42d703570fd8b710b1b4bb9c139a6 | be41bd2f72b98fa976bab8e0446db8a86d27d10d | refs/heads/master | 2021-01-18T07:33:35.032074 | 2013-10-25T18:50:56 | 2013-10-25T18:50:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 865 | rd | bootmatch.Rd | \name{bootmatch}
\alias{bootmatch}
\title{Bootstrap treatment units for propensity score analysis}
\usage{
bootmatch(Tr, Y, X, M = 100, ratio = 3, nstrata = 5,
sample.size = (ratio * min(table(Tr))), ...)
}
\arguments{
\item{Tr}{numeric (0 or 1) or logical vector of treatment
indicators.}
\item{Y}{vector of outcome varaible.}
\item{X}{matrix or data frame of covariates used to
estimate the propensity scores.}
\item{M}{number of bootstrap samples to generate.}
\item{ratio}{the ratio of control units to sample
relative to the treatment units.}
\item{sample.size}{the size of each bootstrap sample of
control units.}
\item{nstrata}{number of strata to use.}
\item{...}{other parameters passed to \code{\link{Match}}
and \code{\link{psa.strata}}}
}
\description{
Bootstrap treatment units for propensity score analysis
}
|
269e4d1bf3bd3efec9e93ccbba09182a6ef216fd | 673d3cb2c9608d08f1ef65e7e72133783c441a96 | /Tong_Anal/3 chfls.R | 65498382236b31826b9105b453f4a3988320715e | [] | no_license | SilverwestKim/Univ | 6db157d36317ab72d7b10ed4d015851d36c62273 | 6f9cc90c3500e60548ef70334ecbdd794fb95337 | refs/heads/master | 2021-05-02T02:29:18.693866 | 2018-11-07T14:39:54 | 2018-11-07T14:39:54 | 120,883,806 | 0 | 0 | null | null | null | null | UHC | R | false | false | 1,247 | r | 3 chfls.R | # 데이터 불러 오기
chfls<-read.csv("CHFLS.csv",header=T)
str(chfls)
table(chfls$R_happy)
xtabs(~R_happy,data=chfls)
barplot(table(chfls$R_happy))
# somewhat happy가 가장 많고 very happy가
# 그 다음으로 많다. 전체적으로 not too happy와
# very unhappy의 비율이 높지 않다.
# 건강상태 vs. 행복정도
table(chfls$R_health)
barplot(table(chfls$R_health))
# not good과 poor의 비율이 작으므로 전반적으로
# 건강상태가 나쁘지 않은 것으로 보인다.
table<-table(chfls$R_health,chfls$R_happy)
table
prop.table(table)
margin.table(table,margin=1)
margin.table(table,margin=2)
install.packages("gmodels")
library(gmodels)
CrossTable(table,prop.r=T,prop.c=F,prop.t=F,expected=F,prop.chisq=F)
CrossTable(table,prop.r=T,prop.c=F,prop.t=F,expected=T,prop.chisq=T)
mosaicplot(table)
# 전체적으로 행복정도와 건강이 연관이 있어 보인다.
# very happy인 경우 건강이 좋을수록 비율이 늘어나는
# 경향이 있는 반면, very unhappy의 경우
# 건강이 poor에서 빈도가 증가하는 것을 알 수 있다.
CrossTable(chfls$R_health,chfls$R_happy)
with(chfls,CrossTable(R_health,R_happy))
mosaicplot(with(chfls,table(R_region,R_happy)))
|
f63b1747fb2da238b3073956a23a8cb51b7891c4 | ba9921f0c04b74a1136c3fea0430cca3961e28d9 | /plot4.R | 8e363f501af67b3cb5d6ced6bdf9a01355a56c45 | [] | no_license | cmconklin/ExData_Plotting1 | 88d20115aad3cc7f7e42473fb53fc3e8f17e4957 | d975eb615df6d96bb062f6d97c9b7403d123ab7a | refs/heads/master | 2021-01-24T21:19:47.493095 | 2015-11-08T22:26:39 | 2015-11-08T22:26:39 | 45,774,653 | 0 | 0 | null | 2015-11-08T09:41:25 | 2015-11-08T09:41:24 | null | UTF-8 | R | false | false | 4,496 | r | plot4.R | ## This R script will create Plot3 of he Assignment 1,
## Global Active Power usage between 2007-02-01 and 2007-02-02
## The print statements provide a clue as to what steps are
## to be executed.
##
## The datafile must already exist in a relative directory "../data"
##
## To execute: > source("plot3.R")
##
## This is a single line graph with three variables
plot1of4 = TRUE
plot2of4 = TRUE
plot3of4 = TRUE
plot4of4 = TRUE
#----------------------------------------------------------------------
# Setup 2 x 2 Plots
#----------------------------------------------------------------------
print("Setup 2 x 2 Plots")
par(mfrow = c(2,2))
#----------------------------------------------------------------------
# Setting up data
#----------------------------------------------------------------------
print("*** Assignment-1, Plot4 ***")
print("Setting data for plots")
print(" Set the beginning and ending dates to filter data.")
dateStart <- as.Date("2007-02-01", format = "%Y-%m-%d")
dateEnd <- as.Date("2007-02-02", format = "%Y-%m-%d")
print(" Reading Data File ...")
dfHpcOrig <- read.csv2("../data/household_power_consumption.txt", sep = ";", as.is = TRUE)
print(" Reading Data File - Done")
print(" Make copy of original data to work with")
dfHpc <- dfHpcOrig
print(" Convert 2nd column - Time, to datetime stamp class.")
dfHpc[[2]] <- strptime(paste(dfHpc$Date,dfHpc$Time), format = "%d/%m/%Y %H:%M:%S")
print(" Convert 1st column - Date, to a date class.")
dfHpc[[1]] <- as.Date(dfHpc$Date, format = "%d/%m/%Y")
print(" Filter date for begin and ending dates")
dfHpc <- dfHpc[which(dfHpc$Date >= dateStart & dfHpc$Date <= dateEnd), ]
#str(dfHpc)
print(" Convert Global_active_power to a number")
suppressWarnings(dfHpc$Global_active_power <- as.numeric(dfHpc$Global_active_power))
print(" Convert Global_reactive_power to a number")
suppressWarnings(dfHpc$Global_reactive_power <- as.numeric(dfHpc$Global_reactive_power))
print(" Convert Sub_Metering_x to a number")
suppressWarnings(dfHpc$Sub_metering_1 <- as.numeric(dfHpc$Sub_metering_1))
suppressWarnings(dfHpc$Sub_metering_2 <- as.numeric(dfHpc$Sub_metering_2))
suppressWarnings(dfHpc$Sub_metering_3 <- as.numeric(dfHpc$Sub_metering_3))
#str(dfHpc)
# str(dfHpc)
# head(dfHpc, 3)
#----------------------------------------------------------------------
# Plot 1 of 4: Global Active Power
#----------------------------------------------------------------------
if (plot1of4 == TRUE) {
print("Plot1: Global Active Power")
print(" Global Active Power in black")
plot(dfHpc$Time, dfHpc$Global_active_power, type="l",
ylab = "Global Active Power", xlab = "", cex.lab=0.7)
}
#----------------------------------------------------------------------
# Plot 2 of 4: Voltage
#----------------------------------------------------------------------
if (plot2of4 == TRUE) {
print("Plot2: Voltage")
print(" Plot Voltage in black")
plot(dfHpc$Time, dfHpc$Voltage, type="l",
ylab = "Voltage", xlab = "datetime", cex.lab=0.7)
}
#----------------------------------------------------------------------
# Plot 3 of 4: Sub Meter Usage
#----------------------------------------------------------------------
if (plot3of4 == TRUE) {
print("Plot3: Sub Meter Usage ...")
print(" Plot Sub_Meter_1 in black")
plot(dfHpc$Time, dfHpc$Sub_metering_1, type="l",
ylab = "Energy sub metering", xlab = "", cex.lab=0.7)
print(" Plot Sub_Meter_2 in red")
points(dfHpc$Time, dfHpc$Sub_metering_2, type="l", col = "red")
print(" Plot Sub_Meter_3 in blue")
points(dfHpc$Time, dfHpc$Sub_metering_3, type="l", col = "blue")
print(" Add Legend")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"), bty="n", lwd=c(1,1,1), cex=0.7,
y.intersp=0.7)
}
#----------------------------------------------------------------------
# Plot 4 of 4: Global Reactive Power
#----------------------------------------------------------------------
if (plot4of4 == TRUE) {
print("Plot4: Global Reactive Power")
print(" Plot Global_reactive_Power in black")
plot(dfHpc$Time, dfHpc$Global_reactive_power, type="l",
ylab = "Global_reactive_power", xlab = "datetime", cex.lab=0.7)
}
#----------------------------------------------------------------------
# Create png file
#----------------------------------------------------------------------
print("Creating Plot4.png file")
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off() |
1b66c967a195c71e47d65929cef141408cd1cbdf | 3077edf6801a2a9b16b5627ef73f1187d6e8124f | /all_functions.R | 891909711924494bb9f4bd9e86394de5ce91f7b1 | [] | no_license | raypallavi/BNP-Computations | d301ca05ec6d9585c7cf364d2e8f643247c79017 | 5dfbd841514f8fe703d72f6a3e7fd17ad4369ded | refs/heads/master | 2020-04-20T10:05:05.251677 | 2019-04-05T23:36:06 | 2019-04-05T23:36:06 | 168,781,139 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 12,766 | r | all_functions.R | ### All required functions for using ESS
### Functions related to Wood and Chan algorithm of drawing samples
### MH for sampling from \nu and \ell
### Covariance matrix and design matrix (using basis function) are also defined
### And all related and dependant functions are here
### Required libraries:
library(fields)
library(FastGP)
### Define design matrix ###
### The basis functions ###
# For monotone function estimation:
psi_j=function(x,my_knot,delta_N)
{
N=length(my_knot)
k=rep(0,N)
i=max(which(my_knot<=x))
if(i==1)
{
k[1]=x-0.5*(x^2)/delta_N
k[2]=x-my_knot[2]*x/delta_N+0.5*x^2/delta_N
}
if(i==2)
{
k[1]=delta_N/2
k[2]=delta_N/2+(x-my_knot[2])*(1+my_knot[2]/delta_N)-0.5*(x^2-my_knot[2]^2)/delta_N
k[3]=(x-my_knot[2])*(1-my_knot[3]/delta_N)+0.5*(x^2-my_knot[2]^2)/delta_N
}
if(i==N)
{
k[1]=delta_N/2
k[2:(N-1)]=delta_N
k[N]=delta_N/2
}
if(i!=1 && i!=2 && i!=N)
{
k[1]=delta_N/2
k[2:(i-1)]=delta_N
k[i]=delta_N/2+(x-my_knot[i])*(1+my_knot[i]/delta_N)-0.5*(x^2-my_knot[i]^2)/delta_N
k[i+1]=(x-my_knot[i])*(1-my_knot[i+1]/delta_N)+0.5*(x^2-my_knot[i]^2)/delta_N
}
return(k)
}
# For convex function estimation:
phi_j=function(x,my_knot,delta_N)
{
N=length(my_knot)
k=rep(0,N)
if(x>=my_knot[1] && x<my_knot[2])
{
k[1]=(x^2/2)-0.5*(x^3/3)/delta_N
k[2]=0.5*(x^3/3)/delta_N
}
if(x>=my_knot[2] && x<my_knot[3])
{
k[1]=(my_knot[2]^2/2)-0.5*(my_knot[2]^3/3)/delta_N+0.5*delta_N*(x-my_knot[2])
k[2]=my_knot[2]^3/(3*delta_N)+0.5*delta_N*(x-my_knot[2])+(x^2-my_knot[2]^2)-1.5*my_knot[2]*(x-my_knot[2])-0.5*(x^3/3)/delta_N
k[3]=(1-my_knot[3]/delta_N)*(x^2/2-x*my_knot[2]+my_knot[2]^2/2)+0.5*(x^3/3-x*my_knot[2]^2+2*my_knot[2]^3/3)/delta_N
}
if(x>=my_knot[3] && x<my_knot[N])
{
k[1]=(my_knot[2]^2/2)-0.5*(my_knot[2]^3/3)/delta_N+0.5*delta_N*(x-my_knot[2])
k[2]=my_knot[2]^3/(3*delta_N)+0.5*delta_N^2+my_knot[3]^2-2.5*my_knot[2]^2-0.5*(my_knot[3]^3/3)/delta_N+delta_N*(x-my_knot[3])
k[3]=(1-my_knot[3]/delta_N)*(my_knot[3]^2/2-my_knot[3]*my_knot[2]+my_knot[2]^2/2)+0.5*(my_knot[3]^3/3-my_knot[3]*my_knot[2]^2+2*my_knot[2]^3/3)/delta_N+delta_N*(x-my_knot[3])
if(N>4){
for(j in 4:(N-1)){
if(x<my_knot[j-1])
k[j]=0
if(x>=my_knot[j-1] && x<my_knot[j])
k[j]=(1-my_knot[j]/delta_N)*(0.5*(x^2-my_knot[j-1]^2)-my_knot[j-1]*(x-my_knot[j-1]))+0.5*((x^3/3-my_knot[j-1]^3/3)-my_knot[j-1]^2*(x-my_knot[j-1]))/delta_N
if(x>=my_knot[j] && x<my_knot[j+1])
k[j]=(1-my_knot[j]/delta_N)*(0.5*(my_knot[j]^2-my_knot[j-1]^2)-my_knot[j-1]*(my_knot[j]-my_knot[j-1]))+0.5*((my_knot[j]^3/3-my_knot[j-1]^3/3)-my_knot[j-1]^2*(my_knot[j]-my_knot[j-1]))/delta_N+0.5*delta_N*(x-my_knot[j])+
(1+my_knot[j]/delta_N)*(0.5*(x^2-my_knot[j]^2)-my_knot[j]*(x-my_knot[j]))-0.5*((x^3/3-my_knot[j]^3/3)-my_knot[j]^2*(x-my_knot[j]))/delta_N
if(x>=my_knot[j+1])
k[j]=(1-my_knot[j]/delta_N)*(0.5*(my_knot[j]^2-my_knot[j-1]^2)-my_knot[j-1]*(my_knot[j]-my_knot[j-1]))+0.5*((my_knot[j]^3/3-my_knot[j-1]^3/3)-my_knot[j-1]^2*(my_knot[j]-my_knot[j-1]))/delta_N+0.5*delta_N*(my_knot[j+1]-my_knot[j])+
(1+my_knot[j]/delta_N)*(0.5*(my_knot[j+1]^2-my_knot[j]^2)-my_knot[j]*(my_knot[j+1]-my_knot[j]))-0.5*((my_knot[j+1]^3/3-my_knot[j]^3/3)-my_knot[j]^2*(my_knot[j+1]-my_knot[j]))/delta_N+delta_N*(x-my_knot[j+1])
}
}
if(x>=my_knot[N-1] && x<my_knot[N])
k[N]=(1-my_knot[N]/delta_N)*(0.5*(x^2-my_knot[N-1]^2)-my_knot[N-1]*(x-my_knot[N-1]))+0.5*((x^3/3-my_knot[N-1]^3/3)-my_knot[N-1]^2*(x-my_knot[N-1]))/delta_N
}
if(x>=my_knot[N])
{
k[1]=(my_knot[2]^2/2)-0.5*(my_knot[2]^3/3)/delta_N+0.5*delta_N*(x-my_knot[2])
k[2]=my_knot[2]^3/(3*delta_N)+0.5*delta_N^2+my_knot[3]^2-2.5*my_knot[2]^2-0.5*(my_knot[3]^3/3)/delta_N+delta_N*(x-my_knot[3])
k[3]=(1-my_knot[3]/delta_N)*(my_knot[3]^2/2-my_knot[3]*my_knot[2]+my_knot[2]^2/2)+0.5*(my_knot[3]^3/3-my_knot[3]*my_knot[2]^2+2*my_knot[2]^3/3)/delta_N+delta_N*(x-my_knot[3])
for(j in 4:(N-1)){
k[j]=(1-my_knot[j]/delta_N)*(0.5*(my_knot[j]^2-my_knot[j-1]^2)-my_knot[j-1]*(my_knot[j]-my_knot[j-1]))+0.5*((my_knot[j]^3/3-my_knot[j-1]^3/3)-my_knot[j-1]^2*(my_knot[j]-my_knot[j-1]))/delta_N+0.5*delta_N*(my_knot[j+1]-my_knot[j])+
(1+my_knot[j]/delta_N)*(0.5*(my_knot[j+1]^2-my_knot[j]^2)-my_knot[j]*(my_knot[j+1]-my_knot[j]))-0.5*((my_knot[j+1]^3/3-my_knot[j]^3/3)-my_knot[j]^2*(my_knot[j+1]-my_knot[j]))/delta_N+delta_N*(x-my_knot[j+1])
}
k[N]=(1-my_knot[N]/delta_N)*(0.5*(my_knot[N]^2-my_knot[N-1]^2)-my_knot[N-1]*(my_knot[N]-my_knot[N-1]))+0.5*((my_knot[N]^3/3-my_knot[N-1]^3/3)-my_knot[N-1]^2*(my_knot[N]-my_knot[N-1]))/delta_N+0.5*delta_N*(x-my_knot[N])
}
return(k)
}
### Function to form design matrix:
des.mat1=function(x,my_knot,delta_N){
# Function to form basis matrix for monotone constraint
n=length(x)
N=length(my_knot)-1
# design matrix \Psi(n X N+1)
X=matrix(0,n,N+1)
for(l in 1:n){
X[l,1:(N+1)]=psi_j(x[l],my_knot,delta_N)
}
return(X)
}
des.mat2=function(x,my_knot,delta_N){
# Function to form basis matrix for convex constraint
n=length(x)
N=length(my_knot)-1
# design matrix \Phi(n X N+1)
X=matrix(0,n,N+1)
for(l in 1:n){
X[l,1:(N+1)]=phi_j(x[l],my_knot,delta_N)
}
return(X)
}
### Function to compute \xi_1 * x :
xix=function(a,b){
return(a*b)
}
#Given a \nu (smoothness parameter of matern kernel) finding a value of
# l (length-scale parameter) such that the correlation between the
# maximum seperation is some small value, say 0.05
#Matern kernel with smoothness nu and length-scale l:
MK = function(x, y ,l, nu){
ifelse(abs(x-y)>0, (sqrt(2*nu)*abs(x-y)/l)^nu/(2^(nu-1)*gamma(nu))*besselK(x=abs(x-y)*sqrt(2*nu)/l, nu=nu), 1.0)
}
# function for uniroot:
fl=function(l,para){
#para[1]=x, para[2]=y and para[3]=nu of MK : Matern kernel function;
#para[4]=pre-specified value of the correlation
a=MK(para[1],para[2],l,para[3])
return(a-para[4])
}
# function for estimating l:
l_est=function(nu,range,val){
# nu : smoothness; range : c(min, max) of the range of variable
# val : pre-specified value of the correlation between the maximum seperation
para=c(range[1],range[2],nu,val)
rl=uniroot(f=fl,interval = c(0.000001,100000),para)
return(rl$root)
}
# Covariance matrix
covmat=function(knot,nu,l){
return(MK(rdist(knot),0,l,nu))
}
# Order of the circulant matrix:
# minimum value of g and m so that G can be embedded into C
min_g=function(knot){
N=length(knot)
g=ceiling(log(2*N,2)) #m=2^g and m>=2(n-1) : Wood & Chan notation;
#since we are going upto n and not stopping at (n-1), the condition is modified!
return("g" = g)
}
# forming the circulant matrix:
circulant=function(x){
n = length(x)
mat = matrix(0, n, n)
for (j in 1:n) {
mat[j, ] <- c(x[-(1:(n+1-j))], x[1:(n+1-j)])
}
return(mat)
}
# Function for forming the vector of circulant matrix:
circ_vec=function(knot,g,nu,l,tausq){
delta_N=1/(length(knot)-1)
m=2**g
cj=integer()
for(j in 1:m){
if(j<=(m/2))
cj[j]=(j-1)*delta_N
else
cj[j]=(m-(j-1))*delta_N
}
x=(tausq*MK(cj,0,l,nu))
return(x)
}
# Function for finding a g such that C is nnd:
eig.eval=function(knot,g,nu,l,tausq){
vec=circ_vec(knot,g,nu,l,tausq)
C=circulant(vec)
ev=min(eigen(C)$values)
return(list("vec" = vec, "min.eig.val" = ev))
}
# Function for finding a g such that C is nnd:
# without forming the circulant matrix and without computing eigen values:
C.eval=function(knot,g,nu,l,tausq){
vec=circ_vec(knot,g,nu,l,tausq)
val=fft(vec) # eigenvalues will be real as the circulant matrix formed by the
# vector is by construction is symmetric!
ev=min(Re(val))
return(list("vec" = vec, "min.eig.val" = ev))
}
nnd_C=function(knot,g,nu,l,tausq){
C.vec=C.eval(knot,g,nu,l,tausq)$vec
eval=C.eval(knot,g,nu,l,tausq)$min.eig.val
if(eval>0)
return(list("cj" = C.vec,"g" = g))
else{
g=g+1
nnd_C(knot,g,nu,l,tausq)
}
}
# computing the eigen values of C using FFT:
eigval=function(knot,nu,l,tausq){
g=min_g(knot)
c.j=nnd_C(knot,g,nu,l,tausq)$cj
lambda=Re(fft(c.j))
if(min(lambda)>0)
return(lambda)
else
stop("nnd condition is NOT satisfied!!!")
}
#################################################################
########## Samples drawn using Wood and Chan Algorithm ##########
#################################################################
samp.WC=function(knot,nu,l,tausq){
N=length(knot)
lambda=eigval(knot,nu,l,tausq)
m=length(lambda)
samp.vec=rep(0,N)
a=rep(0,m)
a[1]=sqrt(lambda[1])*rnorm(1)/sqrt(m)
a[(m/2)+1]=sqrt(lambda[(m/2)+1])*rnorm(1)/sqrt(m)
i=sqrt(as.complex(-1))
for(j in 2:(m/2)){
uj=rnorm(1); vj=rnorm(1)
a[j]=(sqrt(lambda[j])*(uj + i*vj))/(sqrt(2*m))
a[m+2-j]=(sqrt(lambda[j])*(uj - i*vj))/(sqrt(2*m))
}
samp=fft(a)
samp.vec=Re(samp[1:N])
return(samp.vec)
}
#############################################
########## Functions for using ESS ##########
#############################################
ESS = function(beta,nu_ess,y,X,sigsq,eta){
thetamin = 0;
thetamax = 2*pi;
u = runif(1)
logy = loglik(y,X,sigsq,eta,beta) + log(u);
theta = runif(1,thetamin,thetamax);
thetamin = theta - 2*pi;
thetamax = theta;
betaprime = beta*cos(theta) + nu_ess*sin(theta);
while(loglik(y,X,sigsq,eta,betaprime) <= logy){
if(theta < 0)
thetamin = theta
else
thetamax = theta
theta = runif(1,thetamin,thetamax)
betaprime = beta*cos(theta) + nu_ess*sin(theta)
}
return(betaprime)
}
ESS.dec = function(beta,nu_ess,y,X,sigsq,eta){
thetamin = 0;
thetamax = 2*pi;
u = runif(1)
logy = loglik2(y,X,sigsq,eta,beta) + log(u);
theta = runif(1,thetamin,thetamax);
thetamin = theta - 2*pi;
thetamax = theta;
betaprime = beta*cos(theta) + nu_ess*sin(theta);
while(loglik2(y,X,sigsq,eta,betaprime) <= logy){
if(theta < 0)
thetamin = theta
else
thetamax = theta
theta = runif(1,thetamin,thetamax)
betaprime = beta*cos(theta) + nu_ess*sin(theta)
}
return(betaprime)
}
## Defining the loglik function to be used in ESS:
## loglik calculates the log of the likelihood:
loglik=function(y,X,sigsq,eta,beta){
mu=y-(X%*%beta)
val=eta*sum(beta)-sum(log(1+exp(eta*beta)))-sum(mu^2)/(2*sigsq)
return(val)
}
loglik2=function(y,X,sigsq,eta,beta){
mu=y-(X%*%beta)
val=-sum(log(1+exp(eta*beta)))-sum(mu^2)/(2*sigsq)
return(val)
}
## MH algo for \nu of Matern kernel: (NOT USED IN THE MAIN CODE)
nu.MH1 = function(nu.in,l.in,tau.in,xi.in,Kmat,knot,range.nu=c(0.5,1),sd.p=0.05){
nu.cand = exp(log(nu.in)+rnorm(1,0,sd.p))
l.cand = l_est(nu.cand,c(0,1),0.05)
du = dunif(nu.cand,range.nu[1],range.nu[2])
if(du > 0){
Kcand = covmat(knot,nu.cand,l.cand)
Linv = inv_chol(Kmat); Linv.cand = inv_chol(Kcand)
r = exp(sum(log(diag(Linv.cand)))-sum(log(diag(Linv))))*(nu.cand/nu.in)
t1 = sum((t(Linv.cand)%*%xi.in)^2); t2 = sum((t(Linv)%*%xi.in)^2)
r = r*exp(-(t1 - t2)/(2*tau.in))
alpha = min(r,1)
}
else
alpha = 0
u = runif(1)
nu.out = (u < alpha)*nu.cand + (u >= alpha)*nu.in
l.out = (u < alpha)*l.cand + (u >= alpha)*l.in
cnt = as.numeric((abs(nu.out - nu.in) > 0))
return(list("nu" = nu.out,"l" = l.out,"count" = cnt))
}
## MH algo for \nu and \ell of Matern kernel:
nu.MH2 = function(nu.in,l.in,tau.in,xi.in,knot,range.nu=c(0.5,1),range.l=c(0.1,1),sd.nu=0.05,sd.l=0.1){
Kmat = covmat(knot,nu.in,l.in)
Linv = inv_chol(Kmat)
nu.cand = exp(log(nu.in)+rnorm(1,0,sd.nu))
l.cand = exp(log(l.in)+rnorm(1,0,sd.l))
dnu = dunif(nu.cand,range.nu[1],range.nu[2])
dl = dunif(l.cand,range.l[1],range.l[2])
if(dnu > 0 && dl > 0){
Kcand = covmat(knot,nu.cand,l.cand)
Linv.cand = inv_chol(Kcand)
t1 = sum((t(Linv.cand)%*%xi.in)^2)
t2 = sum((t(Linv)%*%xi.in)^2)
r = exp(sum(log(diag(Linv.cand)))-sum(log(diag(Linv)))-((t1 - t2)/(2*tau.in)))*(nu.cand/nu.in)*(l.cand/l.in)
alpha = min(r,1)
}
else{
alpha = 0
Linv.cand = Linv
}
u = runif(1)
nu.out = (u < alpha)*nu.cand + (u >= alpha)*nu.in
l.out = (u < alpha)*l.cand + (u >= alpha)*l.in
cnt = (u < alpha)*1 + (u >= alpha)*0
L_inv = (u < alpha)*Linv.cand + (u >= alpha)*Linv
return(list("nu" = nu.out,"l" = l.out,"count" = cnt,"L_inv"=L_inv))
}
|
3c1186d376b13fba6002c351796637939997e2cc | 602c144363277f2efc062d78bce139dd7fb75480 | /man/Inflacja.Rd | 2cf9682ced1bf48af011306489300eb9c8f7d629 | [] | no_license | mbojan/mbtools | 637c6cfba11a63d7a052867d0afa211af00060ad | ed7680f0f9ae68ea12f6bca403f1b686f20e9687 | refs/heads/master | 2022-07-14T03:25:05.525650 | 2022-06-25T18:43:09 | 2022-06-25T18:43:09 | 29,505,211 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,442 | rd | Inflacja.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Inflacja.R
\docType{data}
\name{Inflacja}
\alias{Inflacja}
\title{Monthly inflation rates in Poland}
\format{
A data frame with 195 observations on the following 6 variables.
\describe{
\item{year}{Year}
\item{month}{Month}
\item{infl1}{a time-series, inflation as compared to the same month
of the previous year}
\item{infl2}{a time-series, inflation as
compared to the previous month}
\item{infl3}{a time-series,
inflation as compared to December of the previous year}
\item{infl4}{a time-series, average inflation of the previous 12
months}
}
}
\source{
(Polish) Central Statistical Office, http://www.stat.gov.pl
}
\description{
Data on monthly inflation rates in Poland since January 1989 up to March
2005.
}
\examples{
data(Inflacja)
### transform inflX variables into time-series objects
Inflacja$infl1ts <- ts(Inflacja$infl1, start=c(1989,1), end=c(2005,3),
freq=12)
Inflacja$infl2ts <- ts(Inflacja$infl2, start=c(1989,1), end=c(2005,3),
freq=12)
Inflacja$infl3ts <- ts(Inflacja$infl3, start=c(1989,1), end=c(2005,3),
freq=12)
Inflacja$infl4ts <- ts(Inflacja$infl4, start=c(1989,1), end=c(2005,3),
freq=12)
### plot some...
plot( Inflacja$infl1ts, main="Inflation rates in Poland",
ylab="Inflation")
lines( Inflacja$infl4ts, lty=2 )
legend( 1995, 1200,
legend=c("compared to year ago","average of last 12 months"),
lty=1:2 )
}
\keyword{datasets}
|
915a061c8b7170c1b94511f0a74a55a885364487 | bc377a0484066010d16658f143ee40699c44a3f1 | /run_analysis.R | 9e474be3a41afe4aa44bc907e5159f9b80763c35 | [] | no_license | wannabeDataScientist/clean_data_project | df45ba48dfd8936898e1b2a730fdf7103940609b | 349c3092d8bc2c241872d3e7fa8fd32edf0285f1 | refs/heads/master | 2021-03-12T20:14:25.293931 | 2014-04-27T23:28:38 | 2014-04-27T23:28:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,952 | r | run_analysis.R | # read sensor data
sensor_data<-function(category)
{
# read the column names
qfile_name <- file.path(".", paste("features", ".txt",sep=""))
col_names <- read.table(qfile_name, header=FALSE, as.is=T, col.names=c("MeasureID", "MeasureName"))
#read sensor data
qfile_name <- file.path(category, paste("X_", category, ".txt",sep=""))
data <- read.table(qfile_name, header=FALSE, col.names=col_names$MeasureName)
# select the columns required
req_col_names <- grep(".*mean\\(\\)|.*std\\(\\)", col_names$MeasureName)
data<-data[,req_col_names]
}
add_column<-function(data,category)
{
# read the activitsensory data
qfile_name <- file.path(category, paste("y_",category, ".txt",sep=""))
activity_table <- read.table(qfile_name, header=FALSE, col.names=c("Activity_ID"))
#read subject data
qfile_name <- file.path(category, paste("subject_",category, ".txt",sep=""))
subject_table <- read.table(qfile_name, header=FALSE, col.names=c("Subject_ID"))
# append the activity id and subject id columns
data$Activity_ID <- activity_table$Activity_ID
data$Subject_ID <- subject_table$Subject_ID
data
}
merge_data_set<-function()
{
data <- rbind(add_column(sensor_data("train"),"train"), add_column(sensor_data("test"),"test"))
#cnames <- colnames(data)
#cnames <- gsub("\\.+mean\\.+", cnames, replacement="Mean")
#cnames <- gsub("\\.+std\\.+", cnames, replacement="Std")
#colnames(data) <- cnames
data
}
apply_descriptive_label <- function(data) {
descriptive_labels <- read.table("activity_labels.txt", header=FALSE, as.is=TRUE, col.names=c("Activity_ID", "Activity_Name"))
#descriptive_labels$Activity_Name <- as.factor(descriptive_labels$Activity_Name)
data_descriptive <- merge(data, descriptive_labels)
data_descriptive
}
create_aggregates<-function(data){
library(reshape2)
# melt the dataset
dimensions = c("Activity_ID", "Activity_Name", "Subject_ID")
fact_vars = setdiff(colnames(data), dimensions)
molten_data <- melt(data, id=dimensions, measure.vars=fact_vars)
# recast
dcast(molten_data, Activity_Name + Subject_ID ~ variable, mean)
}
#Wrapper function
create_assignment_data_set<-function(tidy_file)
{
print("This routine assumes that the extracted files from the zip file \"getdata_projectfiles_UCI HAR Dataset\" are available in \"UCI HAR Dataset\" in the current directory in original structure.")
print(" Source Data Archive:")
print(" https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip")
print("Creating tidy dataset as tidy_data.csv...")
tidy_data_set<-apply_descriptive_label(merge_data_set())
write.table(tidy_data_set,tidy_file)
agg_data_set<-create_aggregates(tidy_data_set)
write.table(agg_data_set,paste(tidy_file,"_agg",sep=""))
}
create_assignment_data_set("tidy_data.txt")
print("Data Set created successfully.")
|
4568aeb75de73a37c452d997643e2e33277088b5 | 97c80ac45d43631596077983dd970a54490a0d57 | /ui.R | c88f683232f0dd2dc85f24df88f8089a0bb6ad5c | [] | no_license | nurfitryah/suicide-rate | 40838f88f6e0fb5a07deec58ec45c2b5e9a6629c | a204564577a595bd72ee6c307c998864bf010b0e | refs/heads/master | 2022-12-12T19:13:43.989385 | 2020-09-15T11:15:11 | 2020-09-15T11:15:11 | 295,701,046 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,455 | r | ui.R | options(shiny.maxRequestSize=200*1024^2)
dashboardPage(title = "Suicide Rate",
dashboardHeader(title = "Suicide Rate"),
dashboardSidebar(
sidebarMenu(
menuItem(
text = "Introduction",
tabName = "intro",
icon = icon("readme")),
menuItem(
text = "5 Country",
tabName = "pie",
icon = icon("chart-pie")),
menuItem(
text = "Age",
tabName = "guyana",
icon = icon("theater-masks")),
menuItem(
text = "Source",
tabName = "source",
icon = icon("bookmark")))
),
dashboardBody(
tabItems(
tabItem(tabName = "intro",
align = "justify",
h1(strong("Introduction : Suicide Rate in 2000, 2010, 2015, and 2016")),
p(h4("As we know that Mental Illness is one of serious disease that could lead to suicide. In fact, 95% of people who commit
suicide have a mental illness [2]. According to the CDC, suicide rates have increased by 30% since 1999.
Nearly 45,000 lives were lost to suicide in 2016 alone.Comments or thoughts about suicide — also
known as suicidal ideation — can begin small like, “I wish I wasn’t here” or “Nothing matters.”
But over time, they can become more explicit and dangerous [3].")),
p(h4("This dataset show age-standardized suicides rate on 183 countries in 2000, 2010, 2015, and 2016. Also this dataset
provide suicide rate by age and gender in 2016. Chart below shows suicide rate in 2000, 2010, 2015,
and 2016, categorized by sex/gender.")),
br(),
highchartOutput(outputId = "h_bar", height = "550px"),
br(),
h4("If you ever wonder how many suicide rate in each 183 country on 2000, 2010, 2015, and 2016, then you could select year
on your left below and see what's happen to Map below."),
br(),
fluidRow(
column(width = 3,
selectInput(
inputId = "Year",
label = "Select Year",
choices = unique(suicide_country$Year)
)),
column(width = 9,
highchartOutput(outputId = "h_map",
height = "650px"))
)),
tabItem(tabName = "pie",
align = "center",
h2(strong("5 Country with Highest and Lowest Suicide Rate in 2016")),
br(),
fluidRow(
column(width = 6,
highchartOutput(outputId = "h_pie1",
height = "600px")
),
column(width = 6,
highchartOutput(outputId = "h_pie2",
height = "600px")
)),
br(),
p(h4("Chart above shows on information maximum of total population of a gender and an age.
If you happened to curious which age or sex/gender on each 183 country that has highest or lowest Suicide Rate,
Tab Age will shows numbers of Suicide Rate categorized by Sex/Gender and Age."))
),
tabItem(tabName = "guyana",
align = "center",
h2(strong("Suicide Rate by Age and Sex/Gender in 2016")),
br(),
selectInput(
inputId = "Country",
label = "Select Country",
choices = unique(suicide_2016$Country)),
highchartOutput(outputId = "h_heat", height = "550px"),
br(),
p(h4("As you can see from heatmap chart above, those 80 years and older have the highest suicide rate
of any age group. As with most age groups, the majority of elders who kill themselves are male [4]. There are may factors that might
contribute somebody or a person to commit suicide. For elderly, most of them feeling loneliness [4].But in a developing country
such as Guyana, the factor or thoughs about suicide are like dominoes. Mental illness, access to lethal chemicals, alcohol misuse,
interpersonal violence, family dysfunction and insufficient mental health resources as key factors that could lead someone
have thoughs about or even do suicide[5].")),
),
tabItem(tabName = "source",
align = "justify",
h2(strong("Source :")),
h4("[1]", tags$a(href = "https://www.kaggle.com/twinkle0705/mental-health-and-suicide-rates", "Data"),""),
br(),
h4("[2]", tags$a(href = "https://www.medscape.com/answers/2013085-157663/what-is-the-role-of-mental-illness-in-the-development-of-suicidal-behaviors", "Source 2")," "),
br(),
h4("[3]", tags$a(href = "https://www.nami.org/About-Mental-Illness/Common-with-Mental-Illness/Risk-of-Suicide", "Source 3")," "),
br(),
h4("[4]", tags$a(href = "https://www.psychologytoday.com/us/blog/understanding-grief/202001/why-do-the-elderly-commit-suicide", "Source 4")," "),
br(),
h4("[5]", tags$a(href = "https://www.npr.org/sections/goatsandsoda/2018/06/29/622615518/trying-to-stop-suicide-guyana-aims-to-bring-down-its-high-rate", "Source 5")," ")
)
))
)
|
07c1ebc8324fa11d02178a15718d6e68169ebefe | fb6bce8e6c5b983277274fcc41a08465f42ef6c2 | /fragment_type_selection.R | 2967482ba96e9082f0c00c44fd5924bfd04597dd | [] | no_license | Cantalapiedra/digestR | 1a14eca70cd12b6a71e588f6f81dc2a42cf89ca1 | f1e94ac6c18dfc445c664ba817fabc4eca64ed6d | refs/heads/master | 2021-01-19T22:05:53.531751 | 2017-02-27T08:34:57 | 2017-02-27T08:34:57 | 82,571,478 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,212 | r | fragment_type_selection.R | #!/usr/bin/env Rscript
## fragment_type_selection
## 2017 CPCantalapiedra
library(data.table)
# Read command line arguments
args = commandArgs(trailingOnly=TRUE)
if (length(args)==2) {
frag_file = args[1] # bed file
frag_types_file = args[2]
} else {
stop("At least one argument must be supplied (input file).n", call.=FALSE)
}
# Read fragment types file
df_frag_types <- fread(frag_types_file, header=FALSE, verbose=FALSE, showProgress=FALSE, sep="\t")
frag_types <- unlist(df_frag_types[,"V1"])
write.table(frag_types, stderr())
# Read bed file
write(paste("Parsing bed file:", frag_file), stderr())
df_bed = fread(frag_file, header=TRUE, sep = "\t", verbose=FALSE, showProgress=FALSE)
head_bed <- head(df_bed, file=stderr())
write.table(head_bed, file = stderr(), row.names = FALSE, quote = FALSE, sep = "\t")
write(paste("Rows:", nrow(df_bed)), stderr())
### Filter by frag type
true_frag_types <- df_bed$fragtype %in% frag_types
filtered <- df_bed[true_frag_types,]#mat_lengths[mat_lengths$len>=min_size && mat_lengths$len>=max_size,]
write(paste("Final rows:", nrow(filtered)), stderr())
# output
write.table(filtered, file = stdout(), row.names=FALSE, quote=FALSE, sep="\t")
## END |
c23ba6eac660097befed27c7a8112cfab390e14a | 9e62400e609e288f753254161299727f6b8c134e | /program/server_local.R | fa1bcf0ef9735bb9b049d6d924e611e9f9ef5f94 | [] | no_license | MaximilianLombardo/kandinsky | 16d11d1f9ea115557d74f9d18c286919cedfb497 | acf6a76dc3ffdeed12f85107b48b056c060cbd3a | refs/heads/master | 2023-07-16T00:43:58.302008 | 2021-08-22T22:23:53 | 2021-08-22T22:23:53 | 293,324,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 35,858 | r | server_local.R | # ----------------------------------------
# -- PROGRAM server_local.R --
# ----------------------------------------
# USE: Session-specific variables and
# functions for the main reactive
# shiny server functionality. All
# code in this file will be put into
# the framework inside the call to
# shinyServer(function(input, output, session)
# in server.R
#
# NOTEs:
# - All variables/functions here are
# SESSION scoped and are ONLY
# available to a single session and
# not to the UI
#
# - For globally scoped session items
# put var/fxns in server_global.R
#
# FRAMEWORK VARIABLES
# input, output, session - Shiny
# ss_userAction.Log - Reactive Logger S4 object
# ----------------------------------------
# -- IMPORTS --
# -- VARIABLES --
#obj <- NULL
userData <- reactiveValues()
userData$object <- NULL
userData$objectId <- 0
userData$modalOpen <- FALSE
userData$filterList <- NULL
userData$filteredObject <- NULL
userSel <- reactiveValues()
userSel$genes <- NULL
userSel$scatterCluster <- "All"
userSel$scatterPanelColumns <- NULL
userSel$runScatterPlot <- FALSE
userSel$violinPanelColumns <- NULL
userSel$runViolinPlot <- FALSE
userSel$addDotGenes <- NULL
userSel$runDotPlot <- FALSE
userSel$addHeatmapGenes <- NULL
userSel$runHeatmap <- FALSE
userSel$diffCluster1 <- NULL
userSel$diffCluster2 <- NULL
userSel$diffClusterCells1 <- NULL
userSel$diffClusterCells2 <- NULL
userSel$runDiffCalc <- FALSE
# -- FUNCTIONS --
# disable_plots <- function() {
# userSel$runScatterPlot <- FALSE
# userSel$runViolinPlot <- FALSE
# userSel$runDotPlot <- FALSE
# userSel$runHeatmap <- FALSE
# userSel$runDiffCalc <- FALSE
# }
filtered_data <- reactive({
result <- userData$object
filters <- list()
if (!is.null(result)) {
# Check if filters available in input and that values have changed since last time
input_names <- names(input)
input_filters <- get_filter_input_Fields(input_names, isolate(userData$objectId))
if (!is.null(result$metadata) && !identical(input_filters, character(0))) {
changed_fields <- NULL
data_filtering <- FALSE
# detect if there has been a change since the last time
for (input_filter in input_filters) {
filter_values <- input[[input_filter]]
filter_name <- gsub(get_filter_prefix(isolate(userData$objectId)), "", input_filter)
if (is.null(filter_values) || !identical(isolate(userData$filterList)[[filter_name]], filter_values)) {
data_filtering <- TRUE
changed_fields <- c(changed_fields, filter_name)
}
({userData$filterList[[filter_name]] <- filter_values})
}
# Filter the data
if (data_filtering) {
metadata <- result$metadata %>%
mutate(id = seq(1, nrow(result$metadata)))
for (field in changed_fields) {
filter_values <- isolate(userData$filterList)[[field]]
if (is.null(filter_values)) {
metadata <- metadata[0,]
break
}
metadata <- metadata %>%
filter(as.vector(!!(rlang::sym(field))) %in% filter_values)
}
if (nrow(metadata) > 0) {
filtered_rowids <- metadata %>% pull(id)
} else {
filtered_rowids <- c()
}
result$tsne <- get_filtered_cell_data(result$tsne, row_ids = filtered_rowids)
result$clusters <- get_filtered_cell_data(result$clusters, row_ids = filtered_rowids)
result$cells <- result$cells[filtered_rowids]
# filter expression and detection data
seurat_object <- result$seurat
if (!is.null(filtered_rowids)) {
assay_use <- seurat_object@active.assay
seurat_object@assays[[assay_use]]@data <- seurat_object@assays[[assay_use]]@data[, filtered_rowids]
seurat_object@active.ident <- droplevels(seurat_object@active.ident[filtered_rowids])
result$expression <- future({avg.ex.scale(seurat_object) %>% as.data.frame()}, stdout = FALSE)
result$detection <- future({local_AverageDetectionRate(seurat_object) %>% as.data.frame()}, stdout = FALSE)
#cleanup
rm(seurat_object)
} else {
result$expression <- future({NULL})
result$detection <- future({NULL})
}
# save filtered data and disable plots
isolate(userData$filteredObject <- result)
disable_plots()
}
# save filtered data
if (!is.null(isolate(userData$filteredObject))) {
result <- isolate(userData$filteredObject)
}
}
}
result
})
# top10_genes <- reactive({
# get_top_genes_data(userData$object, "top10")
# })
#
# top30_genes <- reactive({
# get_top_genes_data(userData$object, "top30")
# })
#
# differentials_table_content <- reactive({
# c1 <- userSel$diffCluster1
# s1 <- userSel$diffClusterCells1
# c2 <- userSel$diffCluster2
# s2 <- userSel$diffClusterCells2
#
# result <- NULL
# if (userSel$runDiffCalc) {
# result <- calculate_differentials(filtered_data(), c1, s1, c2, s2)
# if (is.null(result)) {
# createAlert(session,
# "bodyAlert",
# "zeroDiffOutputAlertID",
# style = "warning",
# content = paste("Differential Analysis resulted in no output for the current selections and logFC > ",
# g_differential_logfc_threshold),
# append = FALSE)
# }
# }
# result
# })
#
# plot name reactives
# base_filename <- reactive({
# current_time <- format(Sys.time(), "%Y.%m.%d_%H.%M")
# paste0(current_time, "_SCV")
# })
#
# base_plot_filename <- reactive({
# paste0(base_filename(), "_", userData$object$meta$object_name)
# })
#
# overview_plot_filename <- reactive({
# paste0(base_plot_filename(), "_Overview")
# })
#
# scatter_plot_filename <- reactive({
# paste0(base_plot_filename(), "_Scatter")
# })
#
# violin_plot_filename <- reactive({
# paste0(base_plot_filename(), "_Violin")
# })
#
# dot_plot_filename <- reactive({
# paste0(base_plot_filename(), "_Dot")
# })
# heatmap_filename <- reactive({
# paste0(base_plot_filename(), "_Heatmap")
# })
#
# differentials1_plot_filename <- reactive({
# paste0(base_plot_filename(), "_Differentials1")
# })
#
# differentials2_plot_filename <- reactive({
# paste0(base_plot_filename(), "_Differentials2")
# })
# download filenames for tables
# top10_DE_filename <- reactive({
# get_top_DE_download_filename(userData$object$meta$object_name, "_Top10DE_Genes")
# })
#
# top30_DE_filename <- reactive({
# get_top_DE_download_filename(userData$object$meta$object_name, "_Top30DE_Genes")
# })
#
# differentials_filename <- reactive({
# get_differentials_filename(userData$object$meta$object_name)
# })
# -- MODULES --
# callModule(downloadableTable, "top10DE", ss_userAction.Log,
# filenameroot = top10_DE_filename,
# downloaddatafxns = list(csv = top10_genes,
# tsv = top10_genes),
# tabledata = top10_genes,
# rownames = FALSE)
#
# callModule(downloadFile, "top30DE", ss_userAction.Log,
# filenameroot = top30_DE_filename,
# datafxns = c(csv = top30_genes,
# tsv = top30_genes))
#
# callModule(heatmap_downloadableTable, "differentialsTable", ss_userAction.Log,
# filenameroot = differentials_filename,
# downloaddatafxns = list(csv = differentials_table_content,
# tsv = differentials_table_content),
# tabledata = differentials_table_content,
# rownames = FALSE)
# ----------------------------------------
# -- SHINY SERVER CODE --
# ----------------------------------------
observeEvent(userData$object, {
updateSelectizeInput(session,
"genesSel",
choices = userData$object$genes,
selected = character(0),
server = TRUE)
updateSelectizeInput(session,
"genesOnSel",
choices = userData$object$genes,
selected = character(0),
server = TRUE)
updateSelectizeInput(session,
"genesOffSel",
choices = userData$object$genes,
selected = character(0),
server = TRUE)
})
observeEvent(c(userData$object, userData$filteredObject), {
if (is.null(userData$filteredObject)) {
data_object <- userData$object
} else {
data_object <- userData$filteredObject
}
cluster_options <- c("All", as.character(sort(unique(data_object$clusters$Cluster))))
updateSelectizeInput(session,
"scatterClusterSel",
choices = cluster_options,
selected = "All",
server = FALSE)
for (item in c("differentialsCluster1Sel", "differentialsCluster2Sel")) {
updateSelectizeInput(session,
item,
choices = cluster_options,
selected = "",
server = FALSE)
}
})
output$summaryTitle <- renderText({
paste("Summary:", userData$object$meta$title)
})
output$filterOptions <- renderUI({
body <- NULL
filter_list <- userData$object$filter_list
if (length(names(filter_list)) > 0) {
header_text <- "Select/Deselect the below options to change the cells included in the visualization data. All the dataset cells are included (checked) by default."
body <- lapply(names(filter_list), FUN = function(name) {
checkboxGroupInput(inputId = paste0(get_filter_prefix(userData$objectId), name),
label = name,
choices = filter_list[[name]],
selected = filter_list[[name]],
width = "80%")
})
} else {
header_text <- "No global filters were defined for this object by the object author"
}
list(tags$div(tags$br(),
tags$h4("Global Filtering"),
tags$p(style = "margin:10px;", header_text)),
tags$div(id = "filtersDiv", body))
})
# Since the filterOptions is on the second (inactive) tab, it's not rendered automatically.
# When switching to this tab, the plot will be rendered again though there is no change. Line below forces it to render.
outputOptions(output, "filterOptions", suspendWhenHidden = FALSE)
output$datasetSummary <- renderUI({
get_dataset_summary(userData$object)
})
output$differentialsText <- renderUI({
diff_text <- NULL
select_area_text <- "select cells on each chart."
html_diff_string_start <- "<center><em>For cluster differential expression analysis select two different clusters.<br>
For sub-cluster analysis choose the same cluster and then"
if (!is.null(input$differentialsCluster1Sel) && !is.null(input$differentialsCluster2Sel) &&
(input$differentialsCluster1Sel == input$differentialsCluster2Sel)) {
diff_text <- paste(html_diff_string_start, paste0("<b>", select_area_text, "</b>"))
} else {
diff_text <- paste(html_diff_string_start, select_area_text)
}
diff_text <- paste0(diff_text, "</em></center>")
HTML(diff_text)
})
output$differentialsTableTitle <- renderUI({
title <- "Differentials Between Selected Clusters"
if (userSel$runDiffCalc) {
if (userSel$diffCluster1 == userSel$diffCluster2) {
title <- paste("Differentials Between Selected Cells in Cluster", userSel$diffCluster1)
} else {
title <- paste("Differentials Between Cluster", userSel$diffCluster1, "and Cluster", userSel$diffCluster2)
}
}
tags$strong(title)
})
output$differentialsTableAlternativeText <- renderUI({
result <- NULL
diff_table_content <- differentials_table_content()
if (is.null(diff_table_content) || nrow(diff_table_content) == 0) {
result <- HTML("<center><em>Press the Calculate Differentials button to perform differential expression analysis on the selected clusters or sub-clusters.</em></center>")
}
result
})
output$cxOverviewPlot <- renderCanvasXpress({
plot <- get_overview_plot(filtered_data(),
overview_plot_filename())
if (is.null(plot)) {
return(canvasXpress(destroy = TRUE))
}
else {
return(plot)
}
})
#SeuratPlot
output$seuratVlnPlot <- renderPlot({
#plot <- makeVlnPlot(filtered_data())
#req(obj)
plot <- makeVlnPlot(obj(),
features = input$vlnFeatures,
splits = input$vlnSplit)
return(plot)
})
output$seuratDimPlot <- renderPlot({
plot <- makeDimPlot(obj(),
reduction = input$dimPlotReduction,
groups = input$dimPlotGroups,
splits = input$dimPlotSplits)
plot <- plot + NoLegend() + NoAxes()
plot <- plot + ggtitle(label = toupper(as.character(input$dimPlotReduction))) + theme(plot.title = element_text(hjust = 0.5))
return(plot)
})
output$seuratFeaturePlot <- renderPlot({
plot <- makeFeaturePlot(obj(),
features = input$featurePlotFeature,
reduction = input$dimPlotReduction,
splits = input$dimPlotSplits)
plot <- lapply(plot, FUN = function(p){p + NoAxes()})
plot <- CombinePlots(plot)
return(plot)
})
#
#labels
output$DimPlotLabel <- renderText({
"Dim Plot Options"
})
output$FeaturePlotLabel <- renderText({
"Feature Plot Options"
})
#tables
output$markerTable <- renderDataTable({
dt <- datatable(obj()@misc$markers, filter = "top",
extensions = 'Buttons',
options = list(
dom = 'Blfrtip',
buttons = c('copy', 'csv', 'excel', 'pdf', 'print')
))
dt <- formatRound(dt, columns = c("p_val",
"avg_logFC",
"p_val_adj"))
return(dt)#Not sure if I should round here
})
#Chord Diagram
output$chordDiagram <- renderPlot({
makeChordDiagram(obj(), input$clusterNumber)
})
output$chordTable <- renderDataTable({
dt <- datatable(obj()@misc$signalling, filter = "top",
extensions = 'Buttons',
options = list(
dom = 'Blfrtip',
buttons = c('copy', 'csv', 'excel', 'pdf', 'print')
))
})
output$UMAP3D <- renderPlotly({
makeUMAP3DPlot(obj())
})
output$hexSelectionPlot <- renderPlotly({
plot <- makeHexSelectionPlot(obj = obj(),
reduction = input$hexSelectReduction,
feature = input$hexSelectFeature,
do.feature = input$hexFeatureBool)
# plot <- makeScatterSelectionPlot(obj = obj(),
# reduction = input$hexSelectReduction,
# feature = input$hexSelectFeature,
# do.feature = input$hexFeatureBool)
#plotly::event_register(plot, 'plotly_selecting')
return(plot)
})
output$selected <- renderPrint({
#req(obj())
plotly::event_data("plotly_selected", source = "hexsource")
#"this is sample text"
})
#output$selectedCellsDimPlot <- <- renderPlot({
# makeSelectedCellsDimPlot(obj(), )
#})
###################################################
output$cxScatterPlot <- renderCanvasXpress({
plot <- NULL
if (userSel$runScatterPlot && !is.null(input$genesSel)) {
plot <- get_scatter_panel_plot(filtered_data(),
userSel$genes,
userSel$scatterCluster,
scatter_plot_filename(),
userSel$scatterPanelColumns)
}
if (is.null(plot)) {
return(canvasXpress(destroy = TRUE))
}
else {
return(plot)
}
})
output$cxViolinPlot <- renderCanvasXpress({
plot <- NULL
if (userSel$runViolinPlot && !is.null(input$genesSel)) {
plot <- get_violin_panel_plot(filtered_data(),
userSel$genes,
violin_plot_filename(),
userSel$violinPanelColumns)
}
if (is.null(plot)) {
return(canvasXpress(destroy = TRUE))
}
else {
return(plot)
}
})
output$cxDotPlot <- renderUI({
gene_count <- 0
if (userSel$runDotPlot) {
plot_result <- get_dot_plot(filtered_data(),
input$genesSel,
get_additional_genes(input$addDotGenes, top10_genes(), top30_genes()),
input$addDotGenes,
dot_plot_filename())
output$dotplot1 <- renderCanvasXpress({plot_result[[1]]})
gene_count <- plot_result[[2]]
} else {
output$dotplot1 <- renderCanvasXpress({canvasXpress(destroy = TRUE)})
}
tagList(
canvasXpressOutput("dotplot1", height = get_dynamic_plot_height(gene_count))
)
})
output$cxHeatmapPlot <- renderUI({
gene_count <- 0
if (userSel$runHeatmap) {
plot_result <- get_heatmap_plot(filtered_data(),
input$genesSel,
get_additional_genes(input$addHeatmapGenes, top10_genes(), top30_genes()),
input$addHeatmapGenes,
heatmap_filename())
output$heatmap1 <- renderCanvasXpress({plot_result[[1]]})
gene_count <- plot_result[[2]]
} else {
output$heatmap1 <- renderCanvasXpress({canvasXpress(destroy = TRUE)})
}
tagList(
canvasXpressOutput("heatmap1", height = get_dynamic_plot_height(gene_count))
)
})
output$cxDifferentialsScatterPlot1 <- renderCanvasXpress({
if (!is.null(input$differentialsCluster1Sel) && input$differentialsCluster1Sel != "") {
get_differential_scatter_plot(isolate(filtered_data()),
input$differentialsCluster1Sel,
paste("Cluster", input$differentialsCluster1Sel),
"cxDifferentialsSelected1",
differentials1_plot_filename())
} else {
return(canvasXpress(destroy = TRUE))
}
})
output$cxDifferentialsScatterPlot2 <- renderCanvasXpress({
if (!is.null(input$differentialsCluster2Sel) && input$differentialsCluster2Sel != "") {
get_differential_scatter_plot(isolate(filtered_data()),
input$differentialsCluster2Sel,
paste("Cluster", input$differentialsCluster2Sel),
"cxDifferentialsSelected2",
differentials2_plot_filename())
} else {
return(canvasXpress(destroy = TRUE))
}
})
# observe inputs
observeEvent(input$top10Genes, {
if (input$top10Genes) {
updateCheckboxInput(session, "top30Genes", value = FALSE)
}
})
observeEvent(input$top30Genes, {
if (input$top30Genes) {
updateCheckboxInput(session, "top10Genes", value = FALSE)
}
})
observeEvent(c(input$differentialsCluster1Sel, input$differentialsCluster2Sel), {
if (input$differentialsCluster1Sel != "" && input$differentialsCluster2Sel != "") {
enable("diffCalculateBtn")
} else {
disable("diffCalculateBtn")
}
})
# blank out plots when options have changed
observeEvent(input$genesSel, {
userSel$runScatterPlot <- FALSE
userSel$runViolinPlot <- FALSE
userSel$runDotPlot <- FALSE
userSel$runHeatmap <- FALSE
})
observeEvent(input$scatterClusterSel, {
if (input$scatterClusterSel != userSel$scatterCluster) {
userSel$runScatterPlot <- FALSE
}
})
observeEvent(input$scatterPanelPlotColumns, {
if (!is.null(userSel$genes) && userSel$genes != "") {
check_result <- check_panel_plot_columns(userSel$genes, input$scatterPanelPlotColumns)
if (!check_result[[1]] || (check_result[[1]]) && check_result[[2]] != userSel$scatterPanelColumns) {
userSel$runScatterPlot <- FALSE
}
}
})
observeEvent(input$violinPanelPlotColumns, {
if (userSel$runViolinPlot) {
check_result <- check_panel_plot_columns(userSel$genes, input$violinPanelPlotColumns)
if (!check_result[[1]] || (check_result[[1]]) && check_result[[2]] != userSel$violinPanelColumns) {
userSel$runViolinPlot <- FALSE
}
}
})
observeEvent(input$addDotGenes, {
if (userSel$runDotPlot) {
if (input$addDotGenes != userSel$addDotGenes) {
userSel$runDotPlot <- FALSE
}
}
})
observeEvent(input$addHeatmapGenes, {
if (userSel$runHeatmap) {
if (input$addHeatmapGenes != userSel$addHeatmapGenes) {
userSel$runHeatmap <- FALSE
}
}
})
observeEvent(c(input$differentialsCluster1Sel, input$differentialsCluster2Sel), {
if (userSel$runDiffCalc) {
userSel$runDiffCalc <- FALSE
}
})
observeEvent(c(input$cxDifferentialsSelected1, input$cxDifferentialsSelected2), {
if ((input$differentialsCluster1Sel == input$differentialsCluster2Sel) && userSel$runDiffCalc) {
userSel$runDiffCalc <- FALSE
}
})
# File to be uploaded has been chosen by user
observeEvent(input$fileChosen, {
if ((input$fileChosen / (1024*1024)) > 10) {
toggleModal(session, "loading_modal", toggle = "open")
userData$modalOpen <- TRUE
}
})
# File modal closed by user
observeEvent(input$fileModalClosed, {
userData$modalOpen <- FALSE
})
#############################
#Update the data object being loaded
obj <- reactive({
readRDS(input$objectInput$datapath)
})
#Update the different control ui based on the new object
#vln qc
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"vlnFeatures",
choices = colnames(obj()@meta.data),
selected = c("nCount_RNA"))
})
#vln qc
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"vlnSplit",
choices = colnames(obj()@meta.data),
selected = c("orig.ident"))
})
#Dimplot
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"dimPlotReduction",
choices = names(obj()@reductions),
selected = c("tsne"))
})
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"dimPlotGroup",
choices = colnames(obj()@meta.data),
selected = c("seurat_clusters"))
})
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"dimPlotSplit",
choices = colnames(obj()@meta.data))
})
#FeaturePlot
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"featurePlotFeature",
choices = rownames(obj()[["RNA"]]@counts),#Change to handle other slots later
selected = VariableFeatures(obj())[1],
server = TRUE)
})
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"featurePlotSlot",
choices = names(obj()@assays),#Change to handle other slots later
selected = c("RNA"))
})
#Chord Diagram
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"clusterNumber",
choices = levels(obj()@active.ident))
})
#Hex Selection Plot
observeEvent(input$objectInput, {
updateSelectizeInput(session,
"hexSelectReduction",
choices = names(obj()@reductions),
selected = names(obj()@reductions[1]))
})
observeEvent(input$objectInput, {
updateSelectizeInput(session,
inputId = "hexSelectFeature",
choices = rownames(obj()[["RNA"]]@counts),
selected = '',
server = TRUE)
})
# File upload has finished
# observeEvent(input$fileInputDialog, {
# input_file <- input$fileInputDialog
# obj <- readRDS(input_file)###
# return(obj)
# if (!is.null(input_file)) {
# load_data_result <- load_data(input_file)
# #return(load_data_result)#Just Returning the Seurat object
# error_messages <- load_data_result[["errors"]]
# if (is.null(error_messages)) {
# userData$objectId <- userData$objectId + 1
# userData$object <- load_data_result[["object"]]
# userData$filteredObject <- NULL
# disable_plots()
# }
#
# if (userData$modalOpen) {
# toggleModal(session, "loading_modal", toggle = "close")
# userData$modalOpen <- FALSE
# }
#
# if (!is.null(error_messages)) {
# output$loading_error_message <- renderText({get_file_dialog_error_message(input_file, error_messages)})
# toggleModal(session, "file_error_modal", toggle = "open")
# }
# }
# })
# About app link clicked
observeEvent(input$about_link, {
session$sendCustomMessage("openTitleInfoBox", runif(1))
})
# Scatter Plot Button Pushed
observeEvent(input$scatterPlotBtn, {
#clear out old alerts and plots
try({
closeAlert(session, "invalidScatterInputAlertID")
userSel$runScatterPlot <- FALSE
})
# check input
invalidInputMessages <- list()
if (is.null(input$genesSel) || input$genesSel == "") {
userSel$genes <- NULL
} else {
userSel$genes <- input$genesSel
}
if (is.null(userSel$genes)) {
invalidInputMessages <- append(invalidInputMessages, "No Genes selected in Chart Options.")
}
if (is.null(input$scatterClusterSel) || input$scatterClusterSel == "") {
userSel$scatterCluster <- NULL
invalidInputMessages <- append(invalidInputMessages, "No Cluster selected.")
}
else {
userSel$scatterCluster <- input$scatterClusterSel
}
if (is.null(input$scatterPanelPlotColumns) || input$scatterPanelPlotColumns == "") {
userSel$scatterPanelColumns <- NULL
invalidInputMessages <- append(invalidInputMessages, "No Panel Plot Columns selected.")
}
else {
check_result <- check_panel_plot_columns(userSel$genes, input$scatterPanelPlotColumns)
if (check_result[[1]]) {
userSel$scatterPanelColumns <- check_result[[2]]
} else {
userSel$scatterPanelColumns <- 0
updateTextInput(session, "scatterPanelPlotColumns", value = "auto")
}
}
if (length(invalidInputMessages) > 0) {
createAlert(session,
"bodyAlert",
"invalidScatterInputAlertID",
style = "warning",
content = paste(invalidInputMessages, collapse = "<br>"),
append = FALSE)
} else {
# trigger plots
userSel$runScatterPlot <- TRUE
}
})
# Violin Plot Button Pushed
observeEvent(input$violinPlotBtn, {
#clear out old alerts and plots
try({
closeAlert(session, "invalidViolinInputAlertID")
userSel$runViolinPlot <- FALSE
})
# check input
invalidInputMessages <- list()
if (is.null(input$genesSel) || input$genesSel == "") {
userSel$genes <- NULL
} else {
userSel$genes <- input$genesSel
}
if (is.null(userSel$genes)) {
invalidInputMessages <- append(invalidInputMessages, "No Genes selected in Chart Options.")
}
if (is.null(input$violinPanelPlotColumns) || input$violinPanelPlotColumns == "") {
userSel$violinPanelColumns <- NULL
invalidInputMessages <- append(invalidInputMessages, "No Panel Plot Columns selected.")
}
else {
check_result <- check_panel_plot_columns(userSel$genes, input$violinPanelPlotColumns)
if (check_result[[1]]) {
userSel$violinPanelColumns <- check_result[[2]]
} else {
userSel$violinPanelColumns <- 0
updateTextInput(session, "violinPanelPlotColumns", value = "auto")
}
}
if (length(invalidInputMessages) > 0) {
createAlert(session,
"bodyAlert",
"invalidViolinInputAlertID",
style = "warning",
content = paste(invalidInputMessages, collapse = "<br>"),
append = FALSE)
} else {
# trigger plots
userSel$runViolinPlot <- TRUE
}
})
# Dot Plot Button Pushed
observeEvent(input$dotPlotBtn, {
#clear out old alerts and plots
try({
closeAlert(session, "invalidDotInputAlertID")
userSel$runDotPlot <- FALSE
})
# check input
invalidInputMessages <- list()
if (is.null(input$genesSel) && input$addDotGenes == "off") {
userSel$addDotGenes <- NULL
invalidInputMessages <- append(invalidInputMessages, "No Genes selected in Chart Options.")
} else {
userSel$addDotGenes <- input$addDotGenes
}
if (length(invalidInputMessages) > 0) {
createAlert(session,
"bodyAlert",
"invalidDotInputAlertID",
style = "warning",
content = paste(invalidInputMessages, collapse = "<br>"),
append = FALSE)
} else {
# trigger plots
userSel$runDotPlot <- TRUE
}
})
# Heatmap Plot Button Pushed
observeEvent(input$heatmapPlotBtn, {
#clear out old alerts and plots
try({
closeAlert(session, "invalidHeatmapInputAlertID")
userSel$runHeatmap <- FALSE
})
# check input
invalidInputMessages <- list()
if (is.null(input$genesSel) && input$addHeatmapGenes == "off") {
userSel$addHeatmapGenes <- NULL
invalidInputMessages <- append(invalidInputMessages, "No Genes selected in Chart Options.")
} else {
userSel$addHeatmapGenes <- input$addHeatmapGenes
}
if (length(invalidInputMessages) > 0) {
createAlert(session,
"bodyAlert",
"invalidHeatmapInputAlertID",
style = "warning",
content = paste(invalidInputMessages, collapse = "<br>"),
append = FALSE)
} else {
# trigger plots
userSel$runHeatmap <- TRUE
}
})
# Differential Button Pushed
observeEvent(input$diffCalculateBtn, {
#clear out old alerts and plots
try({
closeAlert(session, "invalidDiffInputAlertID")
closeAlert(session, "zeroDiffOutputAlertID")
})
userSel$runDiffCalc <- FALSE
userSel$diffCluster1 <- NULL
userSel$diffCluster2 <- NULL
userSel$diffClusterCells1 <- NULL
userSel$diffClusterCells2 <- NULL
# check input
invalidInputMessages <- list()
if (is.null(input$differentialsCluster1Sel) || input$differentialsCluster1Sel == "" ||
is.null(input$differentialsCluster2Sel) || input$differentialsCluster2Sel == "") {
invalidInputMessages <- append(invalidInputMessages, "Both Cluster 1 and Cluster 2 must be selected.")
}
else {
userSel$diffCluster1 <- input$differentialsCluster1Sel
userSel$diffCluster2 <- input$differentialsCluster2Sel
# Combination of All and cluster X not possible
if ((userSel$diffCluster1 == "All" && userSel$diffCluster2 != "All") ||
(userSel$diffCluster1 != "All" && userSel$diffCluster2 == "All")) {
cluster <- setdiff(c(userSel$diffCluster1, userSel$diffCluster2), "All")
message <- paste("Differential analysis between All and Cluster", cluster, "is not available.", "
If you wish to perform sub-cluster analysis on the entire dataset choose All for both plots and select cells on each plot to compute the differentials.")
invalidInputMessages <- append(invalidInputMessages, message)
}
else if (userSel$diffCluster1 == userSel$diffCluster2) {
# Checks if the same cluster selected
if (is.null(input$cxDifferentialsSelected1) || length(input$cxDifferentialsSelected1) < 1 ||
is.null(input$cxDifferentialsSelected2) || length(input$cxDifferentialsSelected2) < 1) {
message <- paste("Cells on both plots must be selected to perform differential sub-cluster analysis.")
invalidInputMessages <- append(invalidInputMessages, message)
}
else if (all(input$cxDifferentialsSelected1 %in% input$cxDifferentialsSelected2) &&
all(input$cxDifferentialsSelected2 %in% input$cxDifferentialsSelected1)) {
message <- paste("The same cells are selected on both plots. To perform differential sub-cluster analysis there must be a difference in the cells selected.")
invalidInputMessages <- append(invalidInputMessages, message)
}
else {
t1 <- input$cxDifferentialsSelected1
t2 <- input$cxDifferentialsSelected2
userSel$diffClusterCells1 <- input$cxDifferentialsSelected1
userSel$diffClusterCells2 <- input$cxDifferentialsSelected2
}
}
}
if (length(invalidInputMessages) > 0) {
createAlert(session,
"bodyAlert",
"invalidDiffInputAlertID",
style = "warning",
content = paste(invalidInputMessages, collapse = "<br>"),
append = FALSE)
} else {
userSel$runDiffCalc <- TRUE
}
})
|
fe4f60a286894f3fa6697ab56e924380ab554931 | d2e16353b0f1f431907f8794dde9024147c1c9de | /test script.R | d1bf71f1bc9922e74acbb65ab9b5d11c470f4dd6 | [] | no_license | SteBrinke/Exp-R-Sschijf | d3e2c8d036865528be894cefe7a15b182f7506f5 | a1b598a15af8658781a3cf3392e7e0b1f0d0cbb5 | refs/heads/master | 2020-04-23T20:37:17.894924 | 2019-02-19T10:24:14 | 2019-02-19T10:24:14 | 171,446,777 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 48 | r | test script.R | #test script
x <- 1:10
y <- mean(x)
z <- var(x)
|
02a9622cd2b1bc11ef8b79d56aed1b8896531410 | 226e31ed001c66cd60b70ee3a452d47d11ec9f69 | /Fraser_et_al_FigureS10.R | 82f816ba618a30125c95560b64f51605e49ca6ba | [] | no_license | mfraser3/ZNRF3_2021 | bb2fdee09b68df052271b9271c23bc23060b1b2c | 1eacd557f6b032c73137f556ad9f67dac6d9958b | refs/heads/main | 2023-04-18T14:10:48.115586 | 2021-08-20T21:08:12 | 2021-08-20T21:08:12 | 398,320,558 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,025 | r | Fraser_et_al_FigureS10.R | # FRASER ET AL - FIGURE S10 #
## LOAD LIBRARIES ####
library(tidyverse)
library(survival)
library(survminer)
## LOAD AND TIDY DATA - CPCG ####
CPCG_ZNRF3_OUTCOME_ADJUSTED_PGA <- readRDS("/Users/michaelfraser/OneDrive/Work/Manuscripts/2020/ZNRF3/FINAL/Nature Cancer/Data and Code/CPCGENE.OUTCOME.ADJUSTED.PGA.rds")
x <- CPCG_ZNRF3_OUTCOME_ADJUSTED_PGA$ZNRF3.RNA
y <- CPCG_ZNRF3_OUTCOME_ADJUSTED_PGA$XBP1.RNA
# SPEARMAN TEST - CPCG ####
cor.test(x, y, alternative = "two.sided", method = "spearman")
# SCATTERPLOT - CPCG ####
ZNRF3.XBP1.RNA <- ggplot(data = CPCG_ZNRF3_OUTCOME_ADJUSTED_PGA, aes(x = ZNRF3.RNA, y = XBP1.RNA)) +
geom_point() +
geom_smooth(colour = "black", method = "lm", se = FALSE) +
xlab("ZNRF3 RNA Abundance") +
ylab("XPB1 RNA Abundance") +
theme(
panel.background = element_blank(),
panel.border = element_rect(fill = NA, size = 2),
axis.text = element_text(face = "bold",
size = 32,
colour = "black"),
axis.title.x = element_text(face = "bold", size = 45,
margin = margin(t= 10, r = 0, b= 0, l = 0)),
axis.title.y = element_text(face = "bold", size = 45,
margin = margin(t=0,r=10,b=0,l=0))
) +
annotate("text",
label = "\U03C1 = 0.372",
x = 7,
y = 12.25,
size = 14,
hjust = 0) +
annotate("text",
label = "P: 3.18 %*% 10^-8",
x = 7,
y = 12.1,
size = 14,
hjust = 0,
parse = TRUE)
ZNRF3.XBP1.RNA
## LOAD DATA - TCGA ####
TCGA_ZNRF3_XBP1_RNA_PFS <- readRDS("/Users/michaelfraser/OneDrive/Work/Manuscripts/2020/ZNRF3/FINAL/Nature Cancer/Data and Code/TCGA_ZNRF3_XBP1_RNA_PFS.rds")
# SPEARMAN TEST - TCGA ####
x <- TCGA_ZNRF3_XBP1_RNA_PFS$ZNRF3.RNA
y <- TCGA_ZNRF3_XBP1_RNA_PFS$XBP1.RNA
cor.test(x, y, alternative = "two.sided", method = "spearman")
## SCATTERPLOT - TCGA ####
ZNRF3.XBP1.RNA <- ggplot(data = TCGA_ZNRF3_XBP1_RNA_PFS, aes(x = ZNRF3.RNA, y = XBP1.RNA)) +
geom_point() +
geom_smooth(colour = "black", method = "lm", se = FALSE) +
xlab("ZNRF3 RNA Abundance") +
ylab("XPB1 RNA Abundance") +
theme(
panel.background = element_blank(),
panel.border = element_rect(fill = NA, size = 2),
axis.text = element_text(face = "bold",
size = 32,
colour = "black"),
axis.title.x = element_text(face = "bold", size = 45,
margin = margin(t= 10, r = 0, b= 0, l = 0)),
axis.title.y = element_text(face = "bold", size = 45,
margin = margin(t=0,r=10,b=0,l=0))
) +
annotate("text",
label = "\U03C1 = 0.171",
x = 7,
y = 12.25,
size = 14,
hjust = 0) +
annotate("text",
label = "P: 1.37 %*% 10^-4",
x = 7,
y = 12.1,
size = 14,
hjust = 0,
parse = TRUE)
ZNRF3.XBP1.RNA
## COX PROPORTIONAL HAZARDS MODELS - TCGA ####
TCGA_ZNRF3_XBP1_RNA_PFS <- TCGA_ZNRF3_XBP1_RNA_PFS %>%
mutate(ZNRF3.XBP1.RNA.BIN = as.numeric(ifelse(ZNRF3.RNA.BIN == 0 & XBP1.RNA.BIN == 0, 0,
ifelse(ZNRF3.RNA.BIN == 1 & XBP1.RNA.BIN == 0,1,
ifelse(ZNRF3.RNA.BIN == 0 & XBP1.RNA.BIN == 1, 2,
ifelse(ZNRF3.RNA.BIN == 1 & XBP1.RNA.BIN == 1, 3, NA))))))
summary(coxph(Surv(pfs.time, pfs.bin) ~ ZNRF3.RNA.BIN + XBP1.RNA.BIN, data = TCGA_ZNRF3_XBP1_RNA_PFS))
## LOG RANK TEST - TCGA ####
survdiff(Surv(pfs.time, pfs.bin) ~ ZNRF3.XBP1.RNA.BIN, data = TCGA_ZNRF3_XBP1_RNA_PFS)
# KAPLAN MEIER CURVE - TCGA ####
fit <- survfit(Surv(pfs.time, pfs.bin) ~ ZNRF3.XBP1.RNA.BIN, data = TCGA_ZNRF3_XBP1_RNA_PFS)
TCGA.ZNRF3.XBP1.RNA.KM <- ggsurvplot(
fit,
data = TCGA_ZNRF3_XBP1_RNA_PFS,
size = 1,
palette =
c("#FF0000", "#104E8B", "black", "chartreuse3"),
conf.int = FALSE,
pval = FALSE,
risk.table = TRUE,
risk.table.title = "Number At Risk",
xlab = "Time Post-Treatment (Months)",
ylab = "Progression-Free Survival",
xlim = c(0,122),
ylim = c(0,1.01),
break.time.by = 24,
axes.offset = FALSE,
font.legend =
c(16),
font.x =
c(20, "bold"),
font.y =
c(20,"bold"),
legend =
c(.25, 0.38),
legend.labs =
c("Both High",
"ZNRF3 Low",
"XBP1 Low",
"Both Low"
),
legend.title = "ZNRF3/XBP1 RNA",
fontsize = 6,
risk.table.height = 0.25,
risk.table.y.text = FALSE
)
TCGA.ZNRF3.XBP1.RNA.KM$table <- TCGA.ZNRF3.XBP1.RNA.KM$table +
theme(
plot.title = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
plot.margin = margin(0,1,1,1,"cm")
)
TCGA.ZNRF3.XBP1.RNA.KM$plot <- TCGA.ZNRF3.XBP1.RNA.KM$plot +
ggplot2::annotate("text",
x=0.3,
y=0.17,
label = "P: 3.0 %*% 10^-4",
hjust=0,
size=8,
color = "black",
parse = TRUE) +
ggplot2::annotate("text",
x=0.3,
y=0.07,
label = "(Logrank test)",
hjust=0,
size=8,
color = "black")
TCGA.ZNRF3.XBP1.RNA.KM$plot <- TCGA.ZNRF3.XBP1.RNA.KM$plot +
theme(
panel.background = element_blank(),
panel.border = element_rect(fill = NA,
size = 2),
axis.line = element_line(size = 0.5,
color='black'),
axis.text.x = element_text(size = 24,
face = "bold",
color = "black"),
axis.text.y = element_text(size = 24,
face = "bold",
color = "black"),
axis.title.x = element_text(size = 30,
face = "bold",
color = "black",
margin = margin(t=10,b=0,r=0,l=0)),
axis.title.y = element_text(size = 26,
face = "bold",
color = "black",
margin = margin(t=0,b=0,r=10,l=10)),
legend.title = element_text(size = 22,
face = "bold",
color = "black"),
legend.text = element_text(size = 20,
face = "italic",
color = "black"),
legend.background = element_blank(),
plot.margin = margin(1,1,1,1, "cm")
)
TCGA.ZNRF3.XBP1.RNA.KM
#
# FOREST PLOT - TCGA ####
znrf3.xbp1.mvcox.table <- TCGA_ZNRF3_XBP1_RNA_PFS %>%
transmute(pfs.time,
pfs.bin,
`ZNRF3`= factor(ZNRF3.RNA.BIN),
`XBP1` = factor(XBP1.RNA.BIN))
znrf3.xbp1.mvcox.table <- znrf3.xbp1.mvcox.table %>%
mutate(ZNRF3 = recode(ZNRF3, `0` = "High", `1` = "Low")) %>%
mutate(XBP1 = recode(XBP1, `0` = "High", `1` = "Low"))
median(CPCG_ZNRF3_OUTCOME_ADJUSTED_PGA$adjusted.pga)
panels <- list(
list(width = 0.03),
list(width = 0.07, display = ~variable, fontface = "bold", heading = "Variable"),
list(width = 0.03, item = "vline", hjust = 0.5),
list(width = 0.1, display = ~level, heading = "Level", fontface = "italic", hjust = 0.5),
list(width = 0.03, item = "vline", hjust = 0.5),
list(width = 0.05, display = ~n, hjust = 0.5, heading = "N"),
list(width = 0.03, item = "vline", hjust = 0.5),
list(
width = 0.75, item = "forest", hjust = 0.5, heading = "Hazard Ratio", linetype = "dashed",
line_x = 0
),
list(width = 0.03, item = "vline", hjust = 0.5),
list(width = 0.12, display = ~ ifelse(reference, "Reference", sprintf(
"%0.2f (%0.2f, %0.2f)",
trans(estimate), trans(conf.low), trans(conf.high)
)), display_na = NA, heading = "HR (95% CI)"),
list(width = 0.03, item = "vline", hjust = 0.5),
list(
width = 0.05,
display = ~ ifelse(reference, "", format.pval(p.value, digits = 1, eps = 0.001)),
display_na = NA, hjust = 0.5, heading = "p-value"
),
list(width = 0.03)
)
TCGA.ZNRF3.XBP1.RNA.PFS.FOREST <- forest_model(coxph(Surv(pfs.time, pfs.bin) ~ ZNRF3 * XBP1, znrf3.xbp1.mvcox.table), panels,
format_options = forest_model_format_options(text_size = 8))
TCGA.ZNRF3.XBP1.RNA.PFS.FOREST +
theme(
axis.text.x = element_text(size = 18,
face = "bold",
color = "black",
angle = 90,
hjust = 1,
vjust = 0.5),
aspect.ratio = 0.9
)
|
52888fcc0c29b222b1519112a542f2a478739c16 | 8518aa91916c77ad8b3757fe824c1873e7609d54 | /R/asymprob2.r | d24d2234007429706165fe5d00471a4d30dd9238 | [] | no_license | cran/BinGSD | 190799b72085e6bd74458e05f12f407c8f8ec812 | 94b7b5de408aa79152c26a23335fedb10096f374 | refs/heads/master | 2020-12-21T21:09:19.043736 | 2019-10-30T16:00:18 | 2019-10-30T16:00:18 | 236,562,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 791 | r | asymprob2.r | #compute the lower boundary crossing probabilities given the design, under H0.
#asymprob2(n.I,lowerbounds,K)
asymprob2<-function(n.I,lowerbounds,K){
sigma=matrix(0,K,K) #the covariance matrix of multivariate normal distribution.
for(i in 1:K){
for(j in 1:K){
sigma[i,j]=sqrt(n.I[min(i,j)]/n.I[max(i,j)])
}
}
problow=rep(0,K)
problow[1]=stats::pnorm(lowerbounds[1]) ##Z_1 follows a standard normal distribution.
##note the last (K-k) lower and upper integration would not influence the result.
for(k in 2:K){
upperlimits=c(rep(Inf,k-1),lowerbounds[k],rep(Inf,(K-k)))
lowerlimits=c(lowerbounds[1:(k-1)],rep(-Inf,(K-k+1)))
problow[k]=mvtnorm::pmvnorm(lower=lowerlimits,upper=upperlimits,mean=rep(0,K),sigma=sigma)[1]
}
return(problow)
} |
92fa0b1c975f5b464df478924779edeed6d665e7 | 33f6b19d9bdcd986121e7772c33a8d246af1a964 | /R/friends.R | 7ccd99c6210592fd220d4db0ee12edc061a267b1 | [] | no_license | cranndarach/rtweet | 6f538c2e4394c42bce618909702b65bb8194b467 | 17d69a0723be3bce97b1b44c7bcd896cac4896cb | refs/heads/master | 2021-01-17T05:04:37.389407 | 2016-09-10T03:29:44 | 2016-09-10T03:29:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,792 | r | friends.R | #' get_friends
#'
#' @description Requests information from Twitter's REST API
#' regarding a user's friend network (i.e., accounts followed
#' by a user). To request information on followers of accounts
#'
#' @param user Screen name or user id of target user.
#' @param page Default \code{page = -1} specifies first page of json
#' results. Other pages specified via cursor values supplied by
#' Twitter API response object.
#' @param parse Logical, indicating whether to return parsed
#' vector or nested list (fromJSON) object. By default,
#' \code{parse = TRUE} saves you the time [and frustrations]
#' associated with disentangling the Twitter API return objects.
#' @param token OAuth token (1.0 or 2.0). By default
#' \code{token = NULL} fetches a non-exhausted token from
#' an environment variable.
#' @seealso \url{https://dev.twitter.com/overview/documentation}
#' @examples
#' \dontrun{
#' # get ids of users followed by the president of the US
#' pres <- get_friends(user = "potus")
#' pres
#'
#' # get ids of users followed by the Environmental Protection Agency
#' epa <- get_friends(user = "epa")
#' epa
#' }
#'
#' @return friends User ids for everyone a user follows.
#' @export
get_friends <- function(user, page = "-1", parse = TRUE,
token = NULL) {
query <- "friends/ids"
stopifnot(is.atomic(user), is.atomic(page),
isTRUE(length(user) == 1))
token <- check_token(token, query)
n.times <- rate_limit(token, query)[["remaining"]]
params <- list(
user_type = user,
count = 5000,
cursor = page,
stringify = TRUE)
names(params)[1] <- .id_type(user)
url <- make_url(
query = query,
param = params)
f <- scroller(url, 1, n.times, token)
f <- f[!sapply(f, is.null)]
if (parse) f <- parse_fs(f)
f
}
|
ef7e251a51eaba8aad98eb7a4fa65f31bae42e51 | c319000e5d98025fb8dfd4617d74bd44b32bb606 | /geog0323/final/r/distdir.R | 6d56885e361b991f137030e49a82924c1cdcc1c6 | [] | no_license | kufreu/kufreu.github.io | 7011047203686fbf9f0fec334fdc3c8341e4d1df | 8a55f269e44bfba670f2462c09da4e260b8dc79b | refs/heads/master | 2021-08-02T23:21:33.186303 | 2021-07-24T22:40:42 | 2021-07-24T22:40:42 | 207,661,358 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,523 | r | distdir.R | #### distdir ####
# this function calculates distance in meters and direction in degrees from an origin (origin) to a destination (input)
# this function is dependent on geosphere, dplyr, and sf
# written by kufre u.
distdir <- function (input, origin, prefix = "") {
library(geosphere)
library(dplyr)
library(sf)
library(units)
print("Distance is measured in meters.")
wgs84 <-
input %>%
as("sf") %>%
st_transform(3395) %>%
st_geometry %>%
st_centroid %>%
st_transform(4326)
if (missing(origin)){
cbd <-
input %>%
as("sf") %>%
st_transform(3395) %>%
st_geometry %>%
st_centroid %>%
st_union %>%
st_centroid %>%
st_transform(4326)
} else {
cbd <-
origin %>%
as("sf") %>%
st_transform(3395) %>%
st_geometry %>%
st_centroid %>%
st_union %>%
st_centroid %>%
st_transform(4326)
}
int <-
input %>%
as("sf") %>%
mutate(
distance = drop_units(st_distance(wgs84, cbd)),
direction_degrees = (bearing(as_Spatial(cbd), as_Spatial(wgs84)) + 360) %% 360
)
result <- int %>%
mutate(direction_card_ord = ifelse(
direction_degrees <= 22.5 |
direction_degrees >= 337.5,
"N",
ifelse(
direction_degrees <= 67.5 &
direction_degrees >= 22.5,
"NE",
ifelse(
direction_degrees <= 122.5 &
direction_degrees >= 67.5,
"E",
ifelse(
direction_degrees <= 157.5 &
direction_degrees >= 112.5,
"SE",
ifelse(
direction_degrees <= 292.5 &
direction_degrees >= 247.5,
"W",
ifelse(
direction_degrees <= 247.5 &
direction_degrees >= 202.5,
"SW",
ifelse(
direction_degrees <= 337.5 &
direction_degrees >= 292.5,
"NW",
ifelse(direction_degrees <= 202.5 &
direction_degrees >= 157.5, "S", "nichts")
)
)
)
)
)
)
))
if (prefix == "") {
result
} else {
result %>%
rename(!!paste(prefix, "distance", sep = "_") := distance,
!!paste(prefix, "direction_degrees", sep = "_") := direction_degrees,
!!paste(prefix, "direction_card_ord", sep = "_") := direction_card_ord)
}
}
|
220dc82cc3e2c63ff3eac6100a8765c632fe74e3 | 0905cabde5e431aea433b339fd76b4aa5c8d6ee9 | /Plot2.R | 5994215bbfafc7748414e49311eebd2c030f5e2d | [] | no_license | Juan0001/ExData_Plotting1 | c97143b13dffedd06e3c30c5a3c470dda4bceec4 | c92f5238984ed65f90a81ad84767993b5ea0ad30 | refs/heads/master | 2020-12-25T10:42:18.169652 | 2015-03-04T01:14:25 | 2015-03-04T01:14:25 | 30,330,526 | 0 | 0 | null | 2015-02-05T01:10:31 | 2015-02-05T01:10:30 | null | UTF-8 | R | false | false | 846 | r | Plot2.R | ## Read the data and subset data from the dates 2007-02-01 and 2007-02-02
data <- read.table("./Data/household_power_consumption.txt",
header = TRUE,
sep = ";",
stringsAsFactors = FALSE)
library(dplyr)
wData <- filter(elecData, Date == "1/2/2007" | Date == "2/2/2007")
## Convert the Date and Time variables to Date/Time classes
wData$Date_Time <- paste(wData$Date, wData$Time)
wData$Date_Time <- strptime(wData$Date_Time, "%d/%m/%Y %H:%M:%S")
wData <- select(wData, -(Date:Time))
## Convert other data to numreic
cols = c(1:6)
wData[, cols] = apply(wData[, cols], 2, function(x) as.numeric(x))
## Construct Plot 2
png("./Figures/Plot2.png", width=480, height=480)
with(wData, plot(Date_Time, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off() |
e71e15ce77c607dee234c2167c5e1ae67a2f70ec | 7fcc5697a1eda15658a8dba645ce94bed1501c2f | /afl_data/R/helpers.R | e84a67d8e1408c3e352ef061d5b03ad4661b04e4 | [
"MIT"
] | permissive | tipresias/bird-signs | c69cd7238a31513d3f1b47d1b28b5113178ba94b | 464ebd3fe5a2fc144f47978ef1c215819df89316 | refs/heads/main | 2021-06-17T15:19:37.320376 | 2021-04-29T12:46:01 | 2021-04-29T12:46:01 | 196,322,870 | 1 | 3 | MIT | 2021-03-21T02:54:48 | 2019-07-11T05:02:02 | R | UTF-8 | R | false | false | 186 | r | helpers.R | convert_to_snake_case <- function(string) {
string %>% stringr::str_to_lower() %>% stringr::str_replace_all("\\.", "_")
}
is_empty <- function(data_frame) {
nrow(data_frame) == 0
}
|
c39ab54c2be81fc446d47ef0900f5aec5aec18ab | 6803e12cc707b1e467e46d1d5ae64d34af943e42 | /cachematrix.R | b2db79ae1a925669f276b7f2e0cf307b7b451648 | [] | no_license | JLeung46/ProgrammingAssignment2 | 5b66acfea874c8de8f83a7ec8837766e7c8a8174 | e71c3a368ee15541d67d4f34e846239d2a113d06 | refs/heads/master | 2021-01-17T18:01:06.999219 | 2015-01-25T20:08:12 | 2015-01-25T20:08:12 | 29,195,296 | 0 | 0 | null | 2015-01-13T15:06:51 | 2015-01-13T15:06:51 | null | UTF-8 | R | false | false | 1,418 | r | cachematrix.R | ## makeCacheMatrix creates an object that contains functions to
## set the value of the matrix, get the value of the matrix,
## set the inverse of the matrix, and get the inverse of the matrix.
## cacheSolve checks if the inverse of the matrix has been calculated.
## If so, fetch it from the cache, otherwise calculate the inverse
## and store it into the cache. Then return the inverse of the matrix.
## Creates a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL # initialize inverse to null
set <- function(y){ # sets the value of the matrix
x <<-y
inv <<- NULL
}
get <- function() x # gets the value of the matrix
setinv <- function(v) inv <<- v # sets the inverse of the matrix
getinv <- function() inv # gets the inverse of the matrix
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Computes the inverse of the special matrix. If inverse
## has already been calculated, then retrieve inverse from cache.
cacheSolve <- function(x, ...) {
inv <- x$getinv() # Gets the inverse of the object
if(!is.null(inv)){ # If previously calculated, retrive from cache
message("getting cached data")
return(inv)
}
data <- x$get() # Otherwise get the data
inv <- solve(data, ...) # Calculate inverse of the matrix
x$setinv(inv) # Set the inverse into cache
inv
}
|
5934a1c59d78c43d6510a6f73aafa5c4be7e56bc | e6d29f8a2fea50e45e37285d3d47985763fd8c03 | /example.R | 03de573aef24df19d83ed8c79b261fe761918155 | [
"MIT"
] | permissive | Mariana-plr/IPAQlong | de0b7b8d532e2d3dccd20204c42911b8c0f897c2 | 6f57ae46fa8779e08fa8efac56ce9b394b8f267e | refs/heads/main | 2023-08-04T09:11:44.930621 | 2023-07-29T12:03:08 | 2023-07-29T12:03:08 | 471,093,506 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,027 | r | example.R | # install IPAQlong
install.packages("devtools")
devtools::install_github("Mariana-plr/IPAQlong")
library("IPAQlong")
# generate example data (IPAQ long form): participants in rows, questions 1:25 (parts 1-4) in columns
data <- matrix(NA,nrow = 3, ncol = 25)
data[,1] <- c(1,1,1) # response to question 1 is either yes-1 or no-0
for (i in 2:25){
if (i%%2 == 0) {
data[,i] <- sample(1:7,3) # questions with an even number refer to days per week
} else {
data[,i] <- sample(1:200,3) # questions with an odd number refer to minutes or hours per day. Hours should be converted to minutes
}
}
data <- as.data.frame(data) # input to IPAQlong functions should be a dataframe object
# Calculate scores using ipaq_scores function and save results in an object called "ipaq_scores"
ipaq_scores <- IPAQlong::ipaq_scores(data = data, truncate = F)
# Calculate subscores using ipaq_subscores function and save results in an object called "ipaq_subscores"
ipaq_subscores <- IPAQlong::ipaq_subScores(data = data)
|
572a65709eeef3d09e15d69e9cdde45645b8a609 | 7f950e1930f11ff7219c0fbf997a6efa0166cd9c | /ui.R | 0514e33eef520504f2496a230cc7b2f9ffd095ae | [] | no_license | phytoclast/BioClimR | ed16b3fae59cac41092de67d4da57f879ac0138c | 431cdd6d440dc29300e004093c015a1d13f413f5 | refs/heads/master | 2020-04-19T13:08:54.262863 | 2020-03-13T20:17:05 | 2020-03-13T20:17:05 | 168,210,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,487 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
#calculate percentiles
library(plyr)
fluidPage(
headerPanel('Biome Climate Browser'),
sidebarPanel(
htmlOutput("Biome"),
htmlOutput("ECO_NAME"),
htmlOutput("country"),
htmlOutput("elev"),
htmlOutput("lat"),
htmlOutput("lon"),
htmlOutput("temp"),
htmlOutput("prec")
),
mainPanel(
plotOutput("climplot"),
verbatimTextOutput('Climtext'),
fluidRow(
column(width=5,
radioButtons("RadioNorm", label = ("Select Timeframe"),
choices = list('Last Glacial Maximum ' = -25000, 'Mid Holocene' = -4000,
'Current' = 1990,
'Moderate global warming' = 2070
,
'Stronger global warming' = 2071),
selected = 1990)
),
column(width = 2,
radioButtons("RadioUnits", label = ("Select Units"),
choices = list('Metric System' = 'm',
'Medieval Units' = 'USC'),
selected = 'm')
),
column(width = 5,
radioButtons("RadioGraphtype",inline = T, label = ("Select Graph"),
choiceNames = list(HTML("<font size=-2>Monthly"),
HTML("Summer × Winter"),
HTML("Summer × Moisture"),
HTML("Surplus × Deficit"),
HTML("Summer × pAET"),
HTML("Winter × pAET"),
HTML("Moisture × Deficit"),
HTML("Moisture × Seasonality"),
HTML("Map"),
HTML("Temperature × Elevation</font>")),
choiceValues = list(1,2,4,5,6,7,8,3,9,10),
selected = 1),
HTML("</font>")
)),
fluidRow(
HTML("<font size=-2>Last Glacial Maximum: ~26,500 years ago (model: CCSM4);<br>
Mid Holocene: ~6000 years ago (model: CCSM4);<br> Current: ~1961-1990 (WorldClim 1.4, http://worldclim.org/);<br>
Moderate global warming: at year 2070 (scenario = RCP 4.5, model = CCSM4);<br>
Stronger global warming: at year 2070 (scenario = RCP 8.5, model = CCSM4)
<br>Temperature and preciptation error bars coorespond to the 10th and 90th percentiles of the geographic variability of the climate averages. NPP was estimated to be between 0.8 to 1.2 kg per square meter per AET depending on vegetation type or fertility, and AET was assumed to track monthly values for dry sites (high slope position, low soil water holding capacity), and annual totals for moist sites (low slope position, high soil water holding capacity). More information about the
"),
tags$a(href="https://www.researchgate.net/publication/327537609_Climate_Classification_Outline", "climate classification"),
HTML(" used above.</font>")
)
)
)
|
7689d7f7520c1ac48ace1a55ac4470e3484bfabc | 9e618621cf49c5730984612d7234fa7ff827b7b8 | /man/computeSIR.Rd | e072cd064fd7f6a85d224b8dc55d38fd4aa2d9c0 | [] | no_license | MARCTELLY/SimuLearning | ac5ae0326affa66112f757416c0d31faa8be9de7 | 2b8b5f277358e265c8bcb03230c3e153b5b7b890 | refs/heads/main | 2023-03-23T20:15:17.288350 | 2021-03-14T23:08:05 | 2021-03-14T23:08:05 | 346,826,336 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 568 | rd | computeSIR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/computeSIR.R
\name{computeSIR}
\alias{computeSIR}
\title{computeSIR
Compute susceptible, Infected and Removote at a given time}
\usage{
computeSIR(alpha, beta, initSusc, timeOfSpread)
}
\arguments{
\item{alpha}{infection rate}
\item{beta}{removal rate}
\item{initSusc}{initial susceptible}
\item{timeOfSpread}{time of spread}
}
\value{
vector
}
\description{
computeSIR
Compute susceptible, Infected and Removote at a given time
}
\examples{
computeSIR(0.5, 0.02, 10, 3)
[1] 0 11 0
}
|
2550005358e08b9ea1d8e97d8d68ecc4fcb16e0e | 75e77231822c4ca6cd737663f59686daabd66a9a | /job_assignment.R | de90254e017f52c36ce0ad93ce0db9be00315675 | [] | no_license | aman11111/Duck-Worth-Lewis-Method-Improvement | 7d780fa0279b1d16b037dd72b8fa138fb33414e2 | b5b091b21acfd227a1191694d23a260f8b6e1b01 | refs/heads/master | 2021-01-25T06:56:12.332255 | 2017-06-07T11:47:57 | 2017-06-07T11:47:57 | 93,628,614 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,732 | r | job_assignment.R | setwd("F:\\")
df <- read.table("ggevent.log", sep = ",", header = FALSE)
#(giving names to the columns)
colnames(df)<-c("ai5","x","y","sdkv","event","ts","z","timestamp","game_id")
library(dplyr)
#(removing columns which are not required further)
df1<-select(df, -x, -y, -z, -sdkv, -ts, -game_id)
library(stringr)
#(breaking the timestamp column into 3 columns and picking the last one)
df2<-str_split_fixed(df1$timestamp, ":", 3)
df1[,3] <- df2[,3]
#(giving the column a name)
colnames(df1)[3] <- "time"
#(spliting time column into three parts and picking the last column into df1)
df4<-str_split_fixed(df1$`time`, " ", 3)
df1[,3]<-df4[,3]
#(spliting event column into three parts and picking the last column into df1)
df5<-str_split_fixed(df1$event, ":", 3)
df1[,2]<-df5[,3]
#(spliting ai5 column into three parts and picking the last column into df1)
df7<-str_split_fixed(df1$ai5, ":", 3)
df1[,1]<-df7[,3]
#(again spliting time column into three parts and converting it into seconds)
df8<-str_split_fixed(df1$time, ":", 3)
for(i in 1:length(df1$ai5)){
df8[i,1]<- 60*(as.numeric(df8[i,2]))+as.numeric(df8[i,3])
}
df1[,3]<-df8[,1]
View(df1)
#(finding total number of different users, here denoted by L)
L<-unique(df1$ai5)
#(making a list of there activities, here list is defined by K)
K<-list(length(L))
for(i in 1:length(L)){
K[[i]]<- df1[which(df1$ai5==L[i]),]
}
#(removing unwanted elements)
rm(df, df2, df4, df5, df7, df8)
#(finding number of rows in every element of the list)
myvec <- sapply(K, NROW)
#(removing elements of the list which have only one row)
z<-which(myvec==1)
for(i in 1:length(z)){
K[[z[i]-i+1]]<-NULL
}
#(making all value of time numeric)
for(j in 1:length(K)){
K[[j]]$time<-as.numeric(K[[j]]$time)
}
#(continuous two ggstart problem )
for(j in 1:length(K)){
for(i in 1:(length(K[[j]]$time)-1)){
if(K[[j]]$event[i]==" ggstart" && K[[j]]$event[i+1] ==" ggstart"){
K[[j]]$time[i+1]<-K[[j]]$time[i]
}
else{
i<-i+1
}
}
}
#(continuous two ggstop problem )
for(j in 1:length(K)){
y=0
for(i in 1:(length(K[[j]]$time)-1-y)){
if(K[[j]]$event[i]==" ggstop" && K[[j]]$event[i+1] ==" ggstop"){
K[[j]]$time[i]<-K[[j]]$time[i+1]
}
else{
i<-i+1
}
}
}
#(for removing one ggstart row from continuous two gstart )
for(j in 1:length(K)){
i<-1
while(i<length(K[[j]]$time)){
if(K[[j]]$event[i]== " ggstart" && K[[j]]$event[i+1] == " ggstart"){
K[[j]]<-K[[j]][-i, ]
}
else{
i<-i+1
}
}
}
#(for removing one ggstop row from continuous two gstop )
for(j in 1:length(K)){
i<-1
while(i<length(K[[j]]$time)){
if(K[[j]]$event[i]== " ggstop" && K[[j]]$event[i+1] == " ggstop"){
K[[j]]<-K[[j]][-i, ]
}
else{
i<-i+1
}
}
}
#(code for conjugative ggstop and ggstart)
for(j in 1:length(K)){
i<-2
while(i<length(K[[j]]$time)){
if(K[[j]]$event[i]== " ggstop" && K[[j]]$event[i+1] == " ggstart"){
if(K[[j]]$time[i+1]-K[[j]]$time[i]<30 && K[[j]]$event[i-1]==" ggstart"){
K[[j]]$time[i+1]<-K[[j]]$time[i-1]
K[[j]]<-K[[j]][c(-i, -(i-1)), ]
}
else{
i<-i+1
}
}
else{
i<-i+1
}
}
}
#(removing matrices which have only one row after removing rows in conjugative ggstop and ggstart)
myvec <- sapply(K, NROW)
m<-which(myvec==1)
for(i in 1:length(m)){
K[[m[i]-i+1]]<-NULL
}
#(total number of valid sessions)
avgarray<-list(length(K))
countvalid<-rep(0, length(K))
for(j in 1:length(K)){
countvalid[j] =0
avgarray[[j]]<-rep(0, (length(K[[j]]$event)-1))
for(i in 1:(length(K[[j]]$event)-1)){
if(K[[j]]$event[i] ==" ggstart" && K[[j]]$event[i+1]==" ggstop"){
if(K[[j]]$time[i+1]-K[[j]]$time[i]>60){
avgarray[[j]][i]<- K[[j]]$time[i+1]-K[[j]]$time[i]
countvalid[j] =countvalid[j] +1
}
else{
avgarray[[j]][i]<- 0
i<-i+1
}
}
}
}
sum(countvalid)
#(total number of sessions)
count<-rep(0, length(K))
for(j in 1:length(K)){
count[j] =0
for(i in 1:(length(K[[j]]$event)-1)){
if(K[[j]]$event[i] ==" ggstart" && K[[j]]$event[i+1]==" ggstop"){
if(K[[j]]$time[i+1]-K[[j]]$time[i]>=1){
count[j] =count[j] +1
}
else{
i<-i+1
}
}
}
}
sum(count)
#(for average time of a session)
h<-unlist(avgarray)
v<-which(h==0)
h<-h[-v]
mean(h)
print("total number of sessions are")
sum(count)
print("total number of valid sessions are")
sum(countvalid)
print("average of valid sessions is")
mean(h)
|
0e3b2717e1a9acb70e7d49474a33b1def52990a9 | 2b1c1c7b88d8a55532931528b1e8f76cb9a67cc6 | /man/filter_state.Rd | 67940a74d13f5efa5869f5ee8e06c0cda920c3a5 | [] | no_license | ktargows/tigris | 61931e7c57ecbd8ac75b8a4a46cfdf322eab318b | 50c5da523b4ccc5816b85f91c4d39dbdd99761f2 | refs/heads/master | 2021-03-13T04:08:23.931958 | 2017-05-16T18:36:25 | 2017-05-16T18:36:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 659 | rd | filter_state.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/states.R
\name{filter_state}
\alias{filter_state}
\title{Filter a \code{states} Spatial object for only those states matching the
contents of the \code{state} vector.}
\usage{
filter_state(states, state)
}
\arguments{
\item{states}{object returned from a call to \code{states}}
\item{state}{a vector of full state names. The function performs the
comparison in a case-insensitive manner.}
}
\description{
Filter a \code{states} Spatial object for only those states matching the
contents of the \code{state} vector.
}
\examples{
\dontrun{
states() \%>\% filter_state("south")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.