blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bd81e37dfcf6b3cdb4c0bd715854b39667cedc7d | d6ff1e6257582f785915e3a0fad3d4896ebd9acb | /R_old/OVERALL_TRANSPIRATION.R | dd4c315e6edbe8f2886bcf7adad85997b5a0dd40 | [] | no_license | RemkoDuursma/Kelly2015NewPhyt | 355084d7d719c30b87200b75887f5521c270b1b5 | 447f263f726e68298ee47746b4de438fbc8fdebf | refs/heads/master | 2021-01-15T13:02:00.392770 | 2015-09-08T04:56:15 | 2015-09-08T04:56:15 | 42,089,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,247 | r | OVERALL_TRANSPIRATION.R | setwd("C:/Documents and Settings/Jeffrey Kelly/Desktop/EUC DATA/EUC OVERALL BIOMASS")
PILBIOMASS<-read.csv("PILTRANSAA.csv",sep=",", header=TRUE)
names(PILBIOMASS)
str(PILBIOMASS)
windows(width=8, height=4) #, pointsize=18)
par(xaxs="i",yaxs="i")
par(las=2)
par(mar=c(4.5,4.5,1,1))
par(xaxs="i",yaxs="i")
par(mfrow=c(1,2), cex.lab=1)
#PIL
par(las=1)
with(PILBIOMASS,plot(E[ST=="PILAD"]~D[ST=="PILAD"],col="blue",pch=1,
ylim=range(0,1.1*max(E)),xlim=range(0,1.05*max(D)),
ylab="",xlab=expression(bold(Day))))
title(main="Eucalyptus pilularis", font.main=4,cex.main=1)
with(PILBIOMASS,arrows(D[ST=="PILAD"],
ESE[ST=="PILAD"], D[ST=="PILAD"], LSE[ST=="PILAD"]
, length = .035, angle = 90, code = 3,col="blue"))
#or
with(PILBIOMASS,points(E[ST=="PILAND"]~D[ST=="PILAND"],col="blue",pch=16))
with(PILBIOMASS,arrows(D[ST=="PILAND"],
ESE[ST=="PILAND"], D[ST=="PILAND"], LSE[ST=="PILAND"]
, length = .035, angle = 90, code = 3,col="blue"))
with(PILBIOMASS,points(E[ST=="PILED"]~D[ST=="PILED"],col="red",pch=1))
with(PILBIOMASS,arrows(D[ST=="PILED"],
ESE[ST=="PILED"], D[ST=="PILED"], LSE[ST=="PILED"]
, length = .035, angle = 90, code = 3,col="red"))
with(PILBIOMASS,points(E[ST=="PILEND"]~D[ST=="PILEND"],col="red",pch=16))
with(PILBIOMASS,arrows(D[ST=="PILEND"],
ESE[ST=="PILEND"], D[ST=="PILEND"], LSE[ST=="PILEND"]
, length = .035, angle = 90, code = 3,col="red"))
par(las=3)
mtext(side = 2, text =expression(bold(Transpiration~~(l~week^-1))), line = 2.5,font=2, cex=1.0)
legend("topleft", expression(aC[a]~-~W, aC[a]~-~D,eC[a]~-~W ,eC[a]~-~D),
cex=0.75,bty="n",
pch = c(16,1,16,1), col=c("blue","blue","red","red"), #xjust = .5, yjust = .5,
)
par(las=1)
setwd("C:/Documents and Settings/Jeffrey Kelly/Desktop/EUC DATA/EUC OVERALL BIOMASS")
POPBIOMASS<-read.csv("POPTRANSAA.csv",sep=",", header=TRUE)
names(POPBIOMASS)
str(POPBIOMASS)
#POP
#bottom,left,top,right
par(xaxs="i",yaxs="i")
par(las=2)
par(mar=c(4.5,1,1,4.5))
par(las=1)
with(POPBIOMASS,plot(E[ST=="POPAD"]~D[ST=="POPAD"],col="blue",pch=1,yaxt="n",
ylim=c(0, 1.1*max(E)),xlim=c(0,1.05*max(D)),
ylab="",xlab=expression(bold(Day))))
title(main="Eucalyptus populnea", font.main=4,cex.main=1)
with(POPBIOMASS,arrows(D[ST=="POPAD"],
ESE[ST=="POPAD"], D[ST=="POPAD"], LSE[ST=="POPAD"]
, length = .035, angle = 90, code = 3,col="blue"))
axis(4,labels=TRUE,tcl=-0.5,cex.axis=1)
#or
with(POPBIOMASS,points(E[ST=="POPAND"]~D[ST=="POPAND"],col="blue",pch=16))
with(POPBIOMASS,arrows(D[ST=="POPAND"],
ESE[ST=="POPAND"], D[ST=="POPAND"], LSE[ST=="POPAND"]
, length = .035, angle = 90, code = 3,col="blue"))
with(POPBIOMASS,points(E[ST=="POPED"]~D[ST=="POPED"],col="red",pch=1))
with(POPBIOMASS,arrows(D[ST=="POPED"],
ESE[ST=="POPED"], D[ST=="POPED"], LSE[ST=="POPED"]
, length = .035, angle = 90, code = 3,col="red"))
with(POPBIOMASS,points(E[ST=="POPEND"]~D[ST=="POPEND"],col="red",pch=16))
with(POPBIOMASS,arrows(D[ST=="POPEND"],
ESE[ST=="POPEND"], D[ST=="POPEND"], LSE[ST=="POPEND"]
, length = .035, angle = 90, code = 3,col="red"))
# looks great on screeen / printer
dev.copy2pdf(file="somname.pdf")
# looks great printed, or after printing MS to PDF
# this is the one you paste in word
dev.copy2eps(file="fig19.eps")
|
7e8c94c982763d3b9a74d47bf81ecba200e74f3e | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /A_github/sources/authors/2774/plotly/coord.R | c489eb9d4c358419e3fd6f91a129c297999fc8aa | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,359 | r | coord.R | #' *** This won't be possible until plotly.js implements aspect ratios... ***
#'
#' #' Force the aspect ratio according to x and y scales
#' #'
#' #' When x and y are numeric variables measured on the same scale,
#' #' or are related in some meaningful way, forcing the aspect ratio of the
#' #' plot to be proportional to the ratio of a unit change in x versus y improves
#' #' our ability to correctly perceive the data.
#' #'
#' #' @param p a plotly object
#' #' @param ratio aspect ratio, expressed as y / x
#' #' @export
#' #' @examples
#' #'
#' #' canada <- map_data("world", "canada")
#' #'
#' #' canada %>%
#' #' group_by(group) %>%
#' #' plot_ly(x = ~long, y = ~lat, alpha = 0.2) %>%
#' #' add_polygons(hoverinfo = "none", color = I("black")) %>%
#' #' coord_fix()
#' #'
#' #' # works on (non-faceted) ggplot2 plots, too
#' #' gg <- ggplot(canada, aes(long, lat, group = group)) +
#' #' geom_polygon() + coord_fixed()
#' #'
#' #' gg %>%
#' #' ggplotly() %>%
#' #' coord_fix()
#' #'
#'
#' coord_fix <- function(p, ratio = 1) {
#' p <- plotly_build(p)
#' # this won't work for subplots, or categorical data
#' x <- grepl("^xaxis", names(p$x$layout))
#' y <- grepl("^yaxis", names(p$x$layout))
#' if (sum(x) > 1 || sum(y) > 1) {
#' stop("Can not impose aspect ratio a plot with more than one x/y axis", call. = FALSE)
#' }
#' xDat <- unlist(lapply(p$x$data, "[[", "x"))
#' yDat <- unlist(lapply(p$x$data, "[[", "y"))
#' if (!is.numeric(xDat) || !is.numeric(yDat)) {
#' stop("Must have numeric data on both x and y axes to enforce aspect ratios", call. = FALSE)
#' }
#'
#' # warn about any pre-populated domains, they will get squashed
#' xDom <- p$x$layout[["xaxis"]]$domain %||% c(0, 1)
#' yDom <- p$x$layout[["yaxis"]]$domain %||% c(0, 1)
#' if (!identical(yDom, c(0, 1)) || !identical(xDom, c(0, 1))) {
#' warning(
#' "coord_fix() won't respect prespecified axis domains (other than the default)",
#' call. = FALSE
#' )
#' }
#'
#' xRng <- range(xDat, na.rm = TRUE)
#' yRng <- range(yDat, na.rm = TRUE)
#' asp <- ratio * diff(yRng) / diff(xRng)
#' if (asp < 1) {
#' p$x$layout[["yaxis"]]$domain <- c(0 + asp / 2, 1 - asp / 2)
#' } else {
#' asp <- 1 / asp
#' p$x$layout[["xaxis"]]$domain <- c(0 + asp / 2, 1 - asp / 2)
#' }
#' p
#' }
|
a0ef2dadf331036f8762dadbd969ae0e3f89da0d | 8ac82c0214d61abd0f6224dfb3e3a6abec07cd75 | /man/load_service_status.Rd | a1705e3ee3f4072627b876f7126fd32d80e7a383 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | scottmmjackson/samsunghealthR | e4a4f0a5c7aef86251e525614adc43439551ff7b | 4c989ca44f953ebebd0944789060a5c2476cb80d | refs/heads/master | 2020-12-03T07:34:47.621463 | 2020-01-01T17:30:36 | 2020-01-01T17:30:36 | 231,243,976 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 319 | rd | load_service_status.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\name{load_service_status}
\alias{load_service_status}
\title{Load Service Status}
\usage{
load_service_status(path)
}
\arguments{
\item{path}{path to CSV file}
}
\description{
Loads Service Status CSV from Samsung Health Data.
}
|
87612036fd5fa980712ac1e05cfc398425c50685 | 86c0b4c6c1746ebf0441c62421748190d057067d | /plot/mass.R | 20769a023b830ddef7c35ab7c099b5ac260e9f87 | [
"MIT"
] | permissive | yufree/democode | 372f0684c49505965b0ba5abe0675c2b6f7fb3da | 0a332ac34a95677ce859b49033bdd2be3dfbe3c4 | refs/heads/master | 2022-09-13T11:08:55.152350 | 2022-08-28T23:09:00 | 2022-08-28T23:09:00 | 20,328,810 | 5 | 14 | null | 2017-01-06T16:07:25 | 2014-05-30T12:41:28 | HTML | UTF-8 | R | false | false | 1,185 | r | mass.R | source("http://bioconductor.org/biocLite.R")
biocLite("mzR")
library(mzR)
all <- openMSfile('./FULL200.CDF')
df <- header(all)
bb <- peaks(all)
aaaa <- sapply(bb,as.data.frame)
oddvals <- seq(1, ncol(aaaa), by=2)
aaaaa <- unlist(aaaa[oddvals])
ccc <- unique(c(aaaaa))
ccc <- ccc[order(ccc)]
# bbb <- sapply(bb, "[",250:700)
# ddd <- unique(c(bbb))
# dddd <- ddd[ddd<700]
time <- df$retentionTime
df2 <- matrix(0, nrow = length(ccc), ncol = length(time))
rownames(df2) <- ccc
colnames(df2) <- time
rm(aaaa)
rm(aaaaa)
rm(oddvals)
rm(df)
rm(all)
gc()
for(i in 1:length(time)){
temp <- bb[[i]]
index <- which(ccc%in%temp[,1])
df2[index,i] <- temp[,2]
}
ddd <- as.integer(ccc)
library(data.table)
dt = data.table(df2)
dt$fac <- ddd
df3 <- dt[,lapply(.SD, sum), by=ddd ]
df3 <- as.matrix(df3)
df7 <- df3[,2000:3000]
heatmap(df7)
library(rARPACK)
df4 <- svds(df3,2)
df5 <- df4$u %*% diag(df4$d) %*% t(df4$v)
rownames(df5) <- ddd
colnames(df5) <- time
df6 <- df5[,2000:3000]
heatmap(df6)
df8 <- as.data.frame(df5)
df9 <- as.data.frame(t(df8))
rownames(df8) <- ddd
colnames(df9) <- time
write.table(df3,'df3.txt')
|
99a524e8baa9751bbd5db7787f3567c66a6e8bee | 4450235f92ae60899df1749dc2fed83101582318 | /ThesisRpackage/R/3Article_old/GSE42861_function.R | 4e60f4de4028e99df28eb3e6e687f0b5409e866e | [
"MIT"
] | permissive | cayek/Thesis | c2f5048e793d33cc40c8576257d2c9016bc84c96 | 14d7c3fd03aac0ee940e883e37114420aa614b41 | refs/heads/master | 2021-03-27T20:35:08.500966 | 2017-11-18T10:50:58 | 2017-11-18T10:50:58 | 84,567,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,617 | r | GSE42861_function.R | #' main experiment
#'
#' @export
GSE42861_experiment <- function(s, save = TRUE) {
# glm
glm <- Method(name = "glm",
hypothesis.testing.method = phenotypeWayReg_glm_score(family = binomial,
factorized.X1 = TRUE),
impute.genotype.method = imputeByMean(),
nickname = "glm")
# glm + 2 PCs
glm_2PC <- PCAClassicLinearMethod(K = 2,
center = TRUE,
hypothesis.testing.method = phenotypeWayReg_glm_score(family = binomial,
factorized.X1 = TRUE),
nickname = "glm+2PCs",
assumingStructure = FALSE)
# glm + 6 pcs
glm_6PC <- PCAClassicLinearMethod(K = 6,
center = TRUE,
hypothesis.testing.method = phenotypeWayReg_glm_score(family = binomial,
factorized.X1 = TRUE),
nickname = "glm+6PCs",
assumingStructure = FALSE)
# glm + 6 refactor
glm_6refractor <- refractorMethod(K = 6,
verbose = FALSE,
t = 500,
nickname = "glm+6refractor")
# glm + 6 lfmm ridge
glm_6lfmm.ridge <- RidgeLFMMMethod(K = 6,
hypothesis.testing.method = phenotypeWayReg_glm_score(),
lambda = 1e6,
nickname = "glm+6lfmm")
# lfmm ridge
lfmm.ridge <- RidgeLFMMMethod(K = 6,
hypothesis.testing.method = lm_zscore(gif = FALSE),
lambda = 1e6,
nickname = "lfmm ridge")
# run exp
exp <- ComparisonExperiment(s,
glm,
glm_2PC,
glm_6PC,
glm_6refractor,
glm_6lfmm.ridge,
lfmm.ridge)
exp <- runExperiment(exp)
# save exp
if (save) {
dumpExperiment(exp)
}
exp
}
#' @export
GSE42861_plot <- function(exp) {
# Rmk: i am only interested in pvalue1 and score1 other pavalue was not computed
# with glm of lm
# qqplot
ggplot(exp$df.res %>% dplyr::filter(variable.name == "pvalue1")) +
stat_qq(aes(sample = -log10(estimate)),
distribution = stats::qexp, dparams = list(rate = log(10))) +
geom_abline(slope = 1, intercept = 0) +
facet_grid(method.name~.) +
ggtitle("-log10(pvalue) qqplot")
}
#' @export
GSE42861_get_RahmaniLoci <- function() {
table <- tabulizer::extract_tables("~/Projects/Biblio/org-ref-pdfs/SF_Rahmani_2016.pdf",
pages = 19, method = "data.frame")[[1]]
table
}
################################################################################
# Long running
#' Run of PCA
#'
#'
#' @export
long_GSE42861_PCA <- function() {
library(Article3Package)
G.file <- "~/Projects/Data2016_2017/GSE42861/betanormalized_metylationlvl.filtered.rds"
X.file <- "~/Projects/Data2016_2017/GSE42861/X.rds"
s <- TrueSampler(G.file = G.file,
X.file = X.file,
outlier.file = NULL,
n = NULL,
L = NULL)
exp <- HGDP_PCA(s, save = TRUE)
}
#' Run of LFMM
#'
#'
#' @export
long_GSE42861_LFMM <- function() {
cl <- parallel::makeCluster(2)
doParallel::registerDoParallel(cl)
library(Article3Package)
G.file <- "~/Projects/Data2016_2017/GSE42861/betanormalized_metylationlvl.filtered.rds"
X.file <- "~/Projects/Data2016_2017/GSE42861/X.rds"
s <- TrueSampler(G.file = G.file,
X.file = X.file,
outlier.file = NULL,
n = NULL,
L = NULL)
lambdas <- c(1e-10, 1e0, 1e2, 1e10)
Ks <- c(1,6,8,20)
HGDB_runs(s, Ks = Ks, lambdas = lambdas, save = TRUE)
}
#' Run of GSE42861_experiment
#'
#'
#' @export
long_GSE42861_exp <- function() {
library(Article3Package)
G.file <- "~/Projects/Data2016_2017/GSE42861/betanormalized_metylationlvl.rds"
X.file <- "~/Projects/Data2016_2017/GSE42861/X.rds"
s <- TrueSampler(G.file = G.file,
X.file = X.file,
outlier.file = NULL,
n = NULL,
L = NULL)
cl <- parallel::makeCluster(6)
doParallel::registerDoParallel(cl)
exp <- GSE42861_experiment(s, save = TRUE)
exp
}
#' cross validation
#'
#'
#' @export
long_GSE42861_CrossVal <- function(cluster.nb = NULL,
K = 6,
G.file = "~/Projects/Data2016_2017/GSE42861/betanormalized_metylationlvl.filtered.rds",
X.file = "~/Projects/Data2016_2017/GSE42861/X.rds",
lambdas = c(1e-10, 1e0, 1e2, 1e10),
rep = 5,
missing.prop = 0.5,
save = TRUE,
bypass = FALSE) {
KrakTest(bypass)
if (!is.null(cluster.nb)) {
cl <- parallel::makeCluster(cluster.nb)
doParallel::registerDoParallel(cl)
}
s <- TrueSampler(G.file = G.file,
X.file = X.file,
outlier.file = NULL,
n = NULL,
L = NULL)
dat <- sampl(s)
m <- finalLfmmRdigeMethod(K = K,
lambda = NULL)
description <- paste0("long_GSE42861_CrossVal with K=", K,
"and lambdas = ",paste(lambdas,collapse = '|'))
exp <- Experiment(name = "long_GSE42861_CrossVal", description = description)
exp$crossvalidation.res <- crossvalidation_kfold_missingvalue(m = m,
dat = dat,
rep = rep,
missing.prop = missing.prop,
lambdas = lambdas)
# save exp
if (save) {
dumpExperiment(exp)
}
exp
}
#' cross validation
#'
#'
#' @export
long_GSE42861_lfmm_glm <- function(K.lfmm = 6,
K.refactor = 6,
G.file = "~/Projects/Data2016_2017/GSE42861/betanormalized_metylationlvl.filtered.rds",
X.file = "~/Projects/Data2016_2017/GSE42861/X.rds",
lambda = 1e-10,
save = TRUE,
bypass = FALSE,
refactor = FALSE) {
KrakTest(bypass)
s <- TrueSampler(G.file = G.file,
X.file = X.file,
outlier.file = NULL,
n = NULL,
L = NULL)
dat <- sampl(s)
G <- dat$G
X <- dat$X
## other co.var correction
dat$G <- G
dat$X <- X[,-1]
m.lm <- finalLm()
m.lm <- fit(m.lm, dat)
m.lfmm <- finalLfmmRdigeMethod(K = K.lfmm,
lambda = lambda)
m.refactor <- finalRefactorMethod(K = K.refactor)
description <- paste0("long_GSE42861_lfmm_glm with K=", K.lfmm,
"and lambdas = ", lambda)
exp <- Experiment(name = "long_GSE42861_lfmm_glm", description = description)
# run of the method
dat$G <- m.lm$epsilon
dat$X <- X[,1, drop = FALSE]
exp$m.lfmm <- fit(m.lfmm, dat)
exp$m.refactor <- fit(m.refactor, dat)
# hypothesis testing
glm.aux <- function(m, name) {
glm.func <- phenotypeWayReg_glm_score(family = binomial,
factorized.X1 = TRUE)
glm.res <- glm.func$fun(m, dat)
df <- tibble(index = 1:length(glm.res$score), method.name = name,
estimate = glm.res$score[1,], variable.name = "score")
df <- tibble(index = 1:length(glm.res$pvalue), method.name = name,
estimate = glm.res$pvalue[1,], variable.name = "pvalue") %>%
rbind(df)
df
}
exp$df.res <- rbind(glm.aux(exp$m.refactor, "refactor"),
glm.aux(exp$m.lfmm, "lfmm"))
# save exp
if (save) {
dumpExperiment(exp)
}
exp
}
|
ac457a941d93eb56777aeb1bda10707ce8907e13 | c54d1c0a3d81bddb25f3f55078f305ad6c15997b | /R/get_internal_tree.R | ffd29651c660f728d71fcc716d3ca033637fb637 | [] | no_license | cran/genpathmox | dc065d3b5ea1c8632068fe3d9bfa7b063045bb2c | 517be94b39d8742cd3d39aedc152e026d865afd6 | refs/heads/master | 2023-01-12T03:39:55.183481 | 2022-12-22T10:00:12 | 2022-12-22T10:00:12 | 25,984,875 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,114 | r | get_internal_tree.R | #' ############################################################################################
#' @title Calculating size (numeber of individual of a node)
#' @details
#' Internal function
#' @param x matrix or dataframe with data.
#' @param size value indicating the minimun threshold of number of observations for a node
#' @return the number of observations in a node
#' @keywords internal
#' @export
#'
percent.node <- function(x,size)
{
indiv = nrow(x)
min.n.ind = trunc(indiv*size)
list(min.n.ind=min.n.ind)
}
#' ############################################################################################
#' @title Calculating Deepth stop criterion
#' @details
#' Internal function
#' @param node id that identifies a specicif node
#' @return deepth of the tree
#' @keywords internal
#' @export
#'
showDeepth=function(node)
{
return (trunc(log2(node@id)))
}
#' ############################################################################################
#' @title Observations belonging to the root node
#' @details
#' Internal function
#' @param moxtree class containing the moxtree elements
#' @return the observations belonging to the root node
#' @keywords internal
#' @export
#'
root.tree <- function(moxtree)
{
root = NULL
for (n in moxtree@nodes)
{
if (n@id == 1)
{
root=n@elements
}
}
root
}
#' ############################################################################################
#' @title Observations belonging to the terminal nodes
#' @details
#' Internal function
#' @param moxtree class containing the moxtree element.
#' @return the observations belonging to the terminal nodes
#' @keywords internal
#' @export
#'
terminal.tree <- function(moxtree)
{
terminal = list()
id = list()
if (length(moxtree@nodes) > 1)
{
for (n in moxtree@nodes)
{
if (n@id == 1)
{
terminal[[length(terminal)+1]] = n@elements
id[[length(id)+1]] = "Root"
}
if (length(n@childs) == 0)
{
terminal[[length(terminal)+1]] = n@elements
id[[length(id)+1]] = n@id
}
}
for (i in 1:length(terminal)){names(terminal) = paste("node",id)}
terminal
}
else
{
terminal = NULL
}
terminal
}
#' ############################################################################################
#' @title Observations belonging to the nodes
#' @details
#' Internal function
#' @param moxtree class containing the moxtree elements
#' @return the observations belonging to the nodes
#' @keywords internal
#' @export
#'
nodes.tree <- function(moxtree)
{
nodes = list()
id = list()
if (length(moxtree@nodes) > 1)
{
for (n in moxtree@nodes)
{
nodes[[length(nodes)+1]] = n@elements
id[[length(id)+1]] = n@id
}
for (i in 1:length(nodes)) {names(nodes) = paste("node",id)}
nodes
}
else
{
nodes = NULL
}
nodes
}
#' ############################################################################################
#' @title Posibble partions for each node of the tree
#' @details
#' Internal function
#' @param moxtree class containing the moxtree elements
#' @return the Posibble partions for each node of the tree
#' @keywords internal
#' @export
#'
candidates.tree <- function(moxtree)
{
candidates = list()
id = list()
if (length(moxtree@nodes) > 1)
{
for (n in moxtree@nodes)
{
if (length(n@childs)>0)
{
candidates[[length(candidates)+1]] = n@info@candidates
id[[length(id)+1]] = n@id
}
}
for (i in 1:length(candidates)) {names(candidates) = paste("node",id)}
candidates
}
else
{
candidates = NULL
}
candidates
}
#' ############################################################################################
#' @title F-global test results for each tree partition
#' @details
#' Internal function
#' @param moxtree class containing the moxtree elements
#' @return the F-global test results for each tree partition
#' @keywords internal
#' @export
#'
fglobal.tree <- function(moxtree)
{
fglobal = list()
fgtable = NULL
if (length(moxtree@nodes) > 1)
{
for (n in moxtree@nodes)
{
if (length(n@childs) > 0)
{
fglobal[[length(fglobal)+1]] = data.frame(n@id,n@info@fgstatistic,n@info@fpvalg,n@info@variable,t(n@info@level))
}
}
for (i in 1:length(fglobal)) {fgtable = rbind(fgtable,fglobal[[i]])}
colnames(fgtable) = c("node","F value","Pr(>F)","variable","g1.mod","g2.mod")
Fg.r = fgtable
}
else
{
Fg.r = NULL
}
Fg.r
}
#' ############################################################################################
#' @title F-coefficients test results for each tree partition
#' @details
#' Internal function
#' @param moxtree class containing the moxtree elements
#' @return the F-coefficients test results for each tree partition
#' @keywords internal
#' @export
fcoef.tree <- function(moxtree)
{
fc = list()
id = list()
fctable = NULL
if (length(moxtree@nodes) > 1)
{
for (n in moxtree@nodes)
{
if (length(n@childs) > 0)
{
id[[length(id)+1]] = n@id
fctable = data.frame(as.matrix(n@info@fcstatistic),as.matrix(n@info@fpvalc))
colnames(fctable) = c("F value","Pr(>F)")
fc[[length(fc)+1]] = fctable
}
}
names(fc) = paste("node",id,sep="")
Fc.r = fc
}
else
{
Fc.r=list(fc=NULL,Signif=NULL)
}
Fc.r
}
#' ############################################################################################
#' @title General information about the tree
#' @details
#' Internal function
#' @param moxtree class containing the tree elements
#' @return a dataframe containing information about the tree and its nodes
#' @keywords internal
#' @export
#'
mox.tree <- function(moxtree)
{
info.node = list()
type = NULL
terminal = NULL
perc = NULL
var = NULL
mox = NULL
if (length(moxtree@nodes)>1)
{
for (n in moxtree@nodes)
{
if (n@id == 1)
{
length.root = length(n@elements)
}
if (length(n@childs) > 0)
{
info.node[[length(info.node)+1]] = data.frame(n@info@variable,n@id,n@childs,n@info@level)
}
if (length(n@childs) == 0)
{
type = "least"
terminal = "yes"
}
if (n@father == 0)
{
type = "root"
terminal = "no"
}
if (n@father!=0 && length(n@childs) != 0)
{
type = "node"
terminal = "no"
}
perc = round((length(n@elements)/length.root)*100,2)
data = data.frame(n@id,n@father,showDeepth(n),type,terminal,length(n@elements),perc)
mox = rbind(mox,data)
}
data.info.node = NULL
for (i in 1:length(info.node)) {data.info.node = rbind(data.info.node,info.node[[i]])}
names(data.info.node)[2] = "n.father"
names(data.info.node)[3] = "n.id"
MOX =merge (mox, data.info.node,by="n.id",all.x=TRUE)[,-9]
names(MOX) = c("node","parent","depth","type","terminal","size","%","variable","category")
MOX
}
else
{
MOX = NULL
}
MOX
}
#' ############################################################################################
#' @title General information about the pathmox algorithm
#' @details
#' Internal function
#' @param signif stop condition 1: significance of the p-value
#' @param size stop condition 2: minimum number of individuals in a node
#' @param deep stop condition 3: maximum tree depth level
#' @param y: set of segmentation variables
#' @keywords internal
#' @export
info.mox <- function(signif,size,deep,y)
{
cat("\n")
cat("PLS-SEM PATHMOX ANALYSIS","\n")
cat("\n")
cat("---------------------------------------------")
cat("\n")
cat("Info parameters algorithm","\n")
info.value = rbind(signif,size,deep)
dimnames(info.value) = NULL
info.name = c("threshold signif.","node size limit(%)","tree depth level")
info.tree = data.frame(info.name,info.value)
names(info.tree) = c("parameters algorithm", "value")
print(info.tree)
cat("\n")
cat("---------------------------------------------")
cat("\n")
cat("Info segmentation variables","\n")
type.y = rep(0, ncol(y))
treat.y = rep("binary", ncol(y))
for (i in 1:length(type.y))
{
type.y[i] = ifelse(is.ordered(y[, i]), "ord","nom")
if (nlevels(y[, i]) > 2)
if (is.ordered(y[, i]))
treat.y[i] = "ordinal"
else treat.y[i] = "nominal"
}
df.y = data.frame(nlevels = unlist(lapply(y, nlevels)),ordered = unlist(lapply(y, is.ordered)),
treatment = treat.y)
if (y[1,1] == 1){
df.y = df.y[-1,]
}
else
{
df.y
}
print(df.y)
}
#' ############################################################################################
#' @title printing the tree structure
#' @details
#' Internal function.
#' @param moxtree moxtree object
#' @return the tree structure
#' @keywords internal
#' @export
#'
printTree <- function(moxtree)
{
for (n in moxtree@nodes){
print (n)
}
}
|
e1cae3ee797bcc7970005fe13bca1ea459ee2833 | e8d1f9b04636822a2513458aebdf80db9a29d947 | /man/genes_mat.Rd | f50996b2dd181b192464712a3c483e2d1c8d92b7 | [] | no_license | huerqiang/prioGene | b9cb6bbade2397dc241d3c30715aece798aac2bb | 2e58c7e0b926072d2c5b1ab6cad8586b84f27d47 | refs/heads/master | 2020-06-18T04:00:40.020606 | 2020-01-08T02:58:34 | 2020-01-08T02:58:34 | 196,157,549 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 410 | rd | genes_mat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{genes_mat}
\alias{genes_mat}
\title{a one-to-many matrix of GO term and gene}
\format{A matrix with 45 rows and 3 columns}
\usage{
genes_mat
}
\description{
the first column is the gene symbol, the second column is the go terms
}
\details{
the third column is the number of go terms
}
\keyword{datasets}
|
84afd0009d68337cd59225335f8ca45ec7753b3d | c2061964216f76ad0f440c76dbfe1119e0279a22 | /R/API-methods.R | 65f3d6cff7f46778421a4f00c57d3ebfa0b38824 | [] | no_license | cran/antaresRead | 046829e05e411adfb55fc652ad49ea84f2610264 | f6a182b21854e12c5c470afcd38c26f44fb2b8d5 | refs/heads/master | 2023-04-16T10:45:23.521378 | 2023-04-06T16:20:02 | 2023-04-06T16:20:02 | 87,090,660 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,663 | r | API-methods.R |
#' API methods
#'
#' @param endpoint API endpoint to interrogate, it will be added after `default_endpoint`.
#' Can be a full URL (by wrapping ìn [I()]), in that case `default_endpoint` is ignored.
#' @param ... Additional arguments passed to API method.
#' @param default_endpoint Default endpoint to use.
#' @param opts Antares simulation options or a `list` with an `host = ` slot.
#'
#' @return Response from the API.
#' @export
#'
#' @name API-methods
#'
#' @importFrom httr GET accept_json stop_for_status content add_headers timeout
#'
#' @examples
#' \dontrun{
#'
#' # List studies with local API
#' api_get(
#' opts = list(host = "http://0.0.0.0:8080"),
#' endpoint = NULL
#' )
#'
#' }
api_get <- function(opts, endpoint, ..., default_endpoint = "v1/studies") {
if (inherits(endpoint, "AsIs")) {
opts$host <- endpoint
endpoint <- NULL
default_endpoint <- NULL
}
if (is.null(opts$host))
stop("No host provided in `opts`: use a valid simulation options object or explicitly provide a host with opts = list(host = ...)")
config <- c(
opts$httr_config,
list(
accept_json()
)
)
if (!is.null(opts$token) && opts$token != "") {
config <- c(
config,
add_headers(Authorization = paste("Bearer ", opts$token))
)
}
if (is.null(opts$timeout))
opts$timeout <- 60
result <- GET(
url = URLencode(paste(c(opts$host, default_endpoint, endpoint), collapse = "/")),
config = config,
timeout(opts$timeout),
...
)
#fix for skipping 404 when some output is missing
url_elements <- strsplit(result$url, "%2F")[[1]]
condition_status_check <- !(!is.na(url_elements[4]) & url_elements[4] %in% c("economy","adequacy") & result$status_code == 404)
if (condition_status_check) stop_for_status(result) else warn_for_status(result)
content(result)
}
#' @export
#'
#' @rdname API-methods
#'
#' @importFrom httr POST accept_json content_type_json stop_for_status content add_headers
api_post <- function(opts, endpoint, ..., default_endpoint = "v1/studies") {
if (inherits(endpoint, "AsIs")) {
opts$host <- endpoint
endpoint <- NULL
default_endpoint <- NULL
}
if (is.null(opts$host))
stop("No host provided in `opts`: use a valid simulation options object or explicitly provide a host with opts = list(host = ...)")
config <- c(
opts$httr_config,
list(
accept_json(),
content_type_json()
)
)
if (!is.null(opts$token) && opts$token != "") {
config <- c(
config,
add_headers(Authorization = paste("Bearer ", opts$token))
)
}
result <- POST(
url = URLencode(paste(c(opts$host, default_endpoint, endpoint), collapse = "/")),
config = config,
...
)
stop_for_status(result)
content(result)
}
#' @export
#'
#' @rdname API-methods
#'
#' @importFrom httr PUT accept_json stop_for_status content add_headers
api_put <- function(opts, endpoint, ..., default_endpoint = "v1/studies") {
if (inherits(endpoint, "AsIs")) {
opts$host <- endpoint
endpoint <- NULL
default_endpoint <- NULL
}
if (is.null(opts$host))
stop("No host provided in `opts`: use a valid simulation options object or explicitly provide a host with opts = list(host = ...)")
if (!is.null(opts$token) && opts$token != "") {
config <- add_headers(Authorization = paste("Bearer ", opts$token), Accept = "application/json")
} else {
config <- add_headers(Accept = "application/json")
}
result <- PUT(
url = URLencode(paste(c(opts$host, default_endpoint, endpoint), collapse = "/")),
config,
...
)
stop_for_status(result)
content(result)
}
#' @export
#'
#' @rdname API-methods
#'
#' @importFrom httr DELETE accept_json stop_for_status content
api_delete <- function(opts, endpoint, ..., default_endpoint = "v1/studies") {
if (inherits(endpoint, "AsIs")) {
opts$host <- endpoint
endpoint <- NULL
default_endpoint <- NULL
}
if (is.null(opts$host))
stop("No host provided in `opts`: use a valid simulation options object or explicitly provide a host with opts = list(host = ...)")
config <- c(
opts$httr_config,
list(
accept_json()
)
)
if (!is.null(opts$token) && opts$token != "") {
config <- c(
config,
add_headers(Authorization = paste("Bearer ", opts$token))
)
}
result <- DELETE(
url = URLencode(paste(c(opts$host, default_endpoint, endpoint), collapse = "/")),
config = config,
...
)
stop_for_status(result)
content(result)
}
|
d29addc45ad1540ad95c8544e8002562baf29435 | d8affab3b21ca33c2b6397e28171c4ad69b03d98 | /regression.R | 471e4414ef889e20c3e50e5acbebf24faa2d7f99 | [] | no_license | nupurkok/analytics | 3e69e9eb88d9eb6cc4f33ae105b7993c46a69fce | b0b76dd306e443aae010cac55ffcda484c39ad42 | refs/heads/master | 2020-03-28T15:22:27.782207 | 2018-09-16T13:03:53 | 2018-09-16T13:03:53 | 148,586,169 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 546 | r | regression.R | women
str(women)
cor(women$height, women$weight)
cov(women$height, women$weight)
plot(women)
#create linear model
fit1 = lm (formula=weight ~ height,data = women)
summary(fit1)
fitted(fit1)
cbind(women, fitted(fit1), residuals(fit1))
ndata1 = data.frame(height = c(62.5, 63.5))
predict(fit1, newdata = ndata1)
#multiple linear regression
#predict mpg vs wt, hp
mtcars
fit2 = lm(mpg ~ wt + hp, data = mtcars)
summary(fit2)
range(mtcars$wt) ; range(mtcars$hp)
ndata2=data.frame(wt=c(2.5,3.4), hp=c(100,250))
predict(fit2, newdata = ndata2)
|
061fa04320c71b1db0c10b5e6894a7f9267ebd0d | a411bbff2c1718c7d1823155138ef10a0c27da89 | /R_tmca_package-master/tmca.unsupervised/man/lda_lda.Rd | fcec6f256336d70d300fe31da10c90203a5ab346 | [] | permissive | ChristianKahmann/data_science_image | 5a0e805ca2cc2d3d8d99ab652dffb4b470dc102f | eb06582d6eaa521a59193ffbfc55c0a0a3eaa886 | refs/heads/master | 2020-10-01T13:53:17.130831 | 2020-01-14T12:48:00 | 2020-01-14T12:48:00 | 227,551,494 | 0 | 0 | MIT | 2019-12-12T07:58:15 | 2019-12-12T07:58:14 | null | UTF-8 | R | false | true | 1,941 | rd | lda_lda.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topic_model.R
\docType{data}
\name{lda_lda}
\alias{lda_lda}
\title{Implementation of LDA::lda.collapsed.gibbs.sampler}
\format{R6Class}
\usage{
lda_lda
}
\description{
Implementation of the topicmodel::LDA algorithm. Implements all of the baseclass methods. Parameters are the number of topics K and the prior alpha
}
\section{Fields}{
\describe{
\item{\code{package}}{(private) List of Registered methods}
\item{\code{tm_machine}}{(private) An instance of a Implementation of a Algorithm}
}}
\section{Slots}{
\describe{
\item{\code{name}}{Sets the name of the method/function (identifier for the class)}
\item{\code{package}}{Sets the name of the package the method was imported from}
\item{\code{parameters}}{Named list of available parameters: alpha and K.}
}}
\section{Methods}{
\describe{
\item{\code{call()}}{This should execute the algorithm and save its results to the internal variable (private$model)}
\item{\code{input(dtm = NULL)}}{ Input the training DTM, transform to the algorithm specific format and save to private$internal_representation.}
\item{\code{output()}}{Tranform the internal representation}
\item{\code{inferTopics(new_text=NULL)}}{Infer the topicdistribution with regards to the calculated model for yet unseen data.
Return the document x topic matrix containing the probabilities of assignment. }
\item{\code{set_parameters(par_list)}}{This method uses a List of named Parameters to set the appropriate values of the implemented algorithm.
It is implemented in this class. It checks whether the input parameters are available in the implementation.
It only sets the parameters if there are no mismatches. }
\item{\code{info()}}{Returns the package and algorithm name.}
}
}
\author{
Janos Borst
}
\keyword{datasets}
|
db63ab7bbd4d3f692bed89bc6cb9d56b96266326 | 1154ea4133e862012fb1d0680ee4dc649c87ab40 | /man/run_primersearch.Rd | f0690751ed513ebdd5c16f7d386a66c28713bec1 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | grunwaldlab/metacoder | f02daa6191254344861c399ef517d54acd6a190f | edd7192858fffc397fb64b9dcac00ed19dbbaa12 | refs/heads/master | 2023-05-03T13:50:13.490344 | 2023-04-20T06:15:31 | 2023-04-20T06:15:31 | 23,885,494 | 128 | 27 | NOASSERTION | 2023-03-28T19:45:07 | 2014-09-10T17:57:54 | R | UTF-8 | R | false | true | 1,274 | rd | run_primersearch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primersearch.R
\name{run_primersearch}
\alias{run_primersearch}
\title{Execute EMBOSS Primersearch}
\usage{
run_primersearch(
seq_path,
primer_path,
mismatch = 5,
output_path = tempfile(),
program_path = "primersearch",
...
)
}
\arguments{
\item{seq_path}{A character vector of length 1. The path to the fasta file containing reference
sequences to search for primer matches in.}
\item{primer_path}{A character vector of length 1. The path to the file containing primer pairs
to match. The file should be whitespace-delimited with 3 columns: primer name, first primer
sequence, and second primer sequence.}
\item{mismatch}{An integer vector of length 1. The percentage of mismatches allowed.}
\item{output_path}{A character vector of length 1. Where the output of primersearch is saved.}
\item{program_path}{A character vector of length 1. The location of the primersearch binary.
Ideally, it should be in your system's search path.}
\item{...}{Additional arguments are passed to \code{primersearch}.}
}
\value{
The command generated as a character vector of length 1.
}
\description{
Execute EMBOSS Primersearch
}
\seealso{
\code{\link{parse_primersearch}}
}
\keyword{internal}
|
46c4e6309d7e779524b8b1a79263f38885577650 | ebb09f52b1ee12d8ae8d4c493e6f1079ee57868c | /ExploratoryDataAnalysis/Project2/plot1.R | 344f1ab64d1fa16fc56bc45754d6205e3ffc4c86 | [] | no_license | r6brian/datasciencecoursera | a1723f812a34eee7094dfaa0bfde6c618b349d6c | 548944d3ba68d302160f05158fb90859bc4c8bae | refs/heads/master | 2021-01-19T10:29:54.605308 | 2015-08-23T20:00:04 | 2015-08-23T20:00:04 | 26,268,379 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 634 | r | plot1.R | # 1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Read data files
NEI <- readRDS("data/exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("data/exdata-data-NEI_data/Source_Classification_Code.rds")
# aggregrate based upon Emissions and Years
totalEmissions <- aggregate(Emissions ~ year, NEI, sum)
# plot a bar graph
png('plot1.png')
barplot(height=totalEmissions$Emissions/10^6,
names.arg=totalEmissions$year,
xlab="years",
ylab=expression('total PM'[2]*' emission(10^6 Tons)'),
main=expression('Total PM'[2]*' emissions at various years'))
dev.off() |
1989e063feb2e6de62bd306ed726df9d77cef61f | 4ad24fafde117a7f5cfffa3733b207aa6cf4ea90 | /man/swan_reportR.Rd | 7c0eb57756c19a7684b62c004cc6bed307826131 | [
"MIT"
] | permissive | dbca-wa/rivRmon | 1fc6ede9b7317cfb1463db52d5c6c85cd4788577 | 2708ec7860b6c81e950b25251a3683d1e56cd48d | refs/heads/master | 2023-04-11T11:07:02.534309 | 2023-04-06T07:56:54 | 2023-04-06T07:56:54 | 202,643,428 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,533 | rd | swan_reportR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swan_reports.R
\name{swan_reportR}
\alias{swan_reportR}
\title{Function to create all of the plots and tables for the annual Swan River
report.}
\usage{
swan_reportR(
inpath,
outpath,
surface = "blue",
bottom = "red",
chloro = "darkgreen"
)
}
\arguments{
\item{inpath}{character filepath to premade annual report data
\code{\link{swan_WIN_report_data}}.}
\item{outpath}{character filepath to desired export location.}
\item{surface}{colour for surface plots. Can be named colour or hex format.
Defaults to "blue".}
\item{bottom}{colour for bottom plots. Can be named colour or hex format.
Defaults to "red".}
\item{chloro}{colour for integrated plots. Can be named colour or hex format.
Defaults to "darkgreen".}
}
\value{
panel plots for all metrics and a csvs of metrics for
inclusion to tables.
}
\description{
\code{swan_reportR} produces panel plots and tables for all
metrics.
}
\details{
This is a wrapper function that runs all of the individual functions
to produce all of the plots and tables for the Swan River.
Outputs will be exported to two folders created at the outpath
location. `s_panels/` for plots and `s_tables/` for data tables.
}
\examples{
\dontrun{
swan_reportR(inpath, outpath, surface = "blue", bottom = "red",
chloro = "darkgreen")}
}
\author{
Bart Huntley, \email{[email protected]}
For more details see \url{https://dbca-wa.github.io/rivRmon/index.html}
{the rivRmon website}
}
|
d016bf7c1cea2be45570d0826610230b375be3ce | 9bc17a169325375bc993b540d2ad0f0810ca0e76 | /R/twoway.plots.R | a98edb8797477c8f6316b7dfb57853a3015db298 | [] | no_license | alanarnholt/PASWR | 335b960db32232a19d08560938d26f168e43b0d6 | f11b56cff44d32c3683e29e15988b6a37ba8bfd4 | refs/heads/master | 2022-06-16T11:34:24.098378 | 2022-05-14T22:56:11 | 2022-05-14T22:56:11 | 52,523,116 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,375 | r | twoway.plots.R | #' @title Exploratory Graphs for Two Factor Designs
#'
#' @description Function creates side-by-side boxplots for each factor, a design plot (means), and an interaction plot.
#'
#' @param Y response variable
#' @param fac1 factor one
#' @param fac2 factor two
#' @param COL a vector with two colors
#'
#' @author Alan T. Arnholt <arnholtat@@appstate.edu>
#'
#' @seealso \code{\link{oneway.plots}}, \code{\link{checking.plots}}
#'
#' @export
#'
#' @examples
#' with(data = TireWear, twoway.plots(Wear, Treat, Block))
#'
#' @keywords hplot
####################################################################
twoway.plots<-function(Y, fac1, fac2, COL=c("#A9E2FF", "#0080FF")){
opar <- par(no.readonly = TRUE)
par(mfrow=c(2, 2), mar = c(5.1, 4.1, 1.1, 1.1))
YL <- range(Y)
plot(Y ~ fac1, col = COL[1], xlab = deparse(substitute(fac1)),
ylab = deparse(substitute(Y)), ylim = YL)
plot(Y ~ fac2, col = COL[2], xlab = deparse(substitute(fac2)),
ylab = deparse(substitute(Y)), ylim = YL)
plot.design(Y ~ fac1 + fac2, fun = "mean",
ylab = deparse(substitute(Y)), ylim = YL)
interaction.plot(fac1, fac2, Y, xlab = deparse(substitute(fac1)),
trace.label = deparse(substitute(fac2)),
type = "b", legend = FALSE,
ylab = deparse(substitute(Y)), ylim = YL)
on.exit(par(opar))
}
|
b4e93e3bcccb0eb0d1014bd355bcfff5a5be6187 | 280019f481fe09da00296f45e5fa530051780756 | /ui.R | 10b50c14611abb664f6dbfc7ea4c164e2ac58b15 | [] | no_license | linareja/2017_Buenos_Aires_Elections | 1effb2b1d39bf660e9fa678a6a78ac3000f2122c | d500aaedb233fe541fe00dc63f0d488043467111 | refs/heads/master | 2021-09-12T16:33:33.715908 | 2018-04-18T18:19:56 | 2018-04-18T18:19:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,802 | r | ui.R |
library(shiny)
dashboardPage(
dashboardHeader(title = "2017 Elections in Buenos Aires Province"),
dashboardSidebar( sidebarMenu(
menuItem("Overview", tabName = "overview", icon = icon("globe")),
menuItem("Analysis", tabName = "analysis", icon = icon("bar-chart"))
)),
dashboardBody(
tabItems(
tabItem("overview",
fluidRow(
column(6,selectInput("charge", "Select charge to visualize", levels(raw_data$variable))
)),
fluidRow(
column(6, h3("Overview"))
),
fluidRow(
column(6,drawMapUI("map1")
),
column(6,drawTreeMapUI("treemap1")
)
),
fluidRow(
column(6, h3("Comparison"))
),
fluidRow(
column(6, selectInput("comparison_plot", "Select Comparison Plot", choices = c("Map", "Treemap")))
),
#Aca va a ir un selector para comparar por treemap o por mapa
conditionalPanel("input.comparison_plot == 'Map'",
fluidRow(
column(6,drawMapUI("map_compare1",is.multiple = F)),
column(6,drawMapUI("map_compare2",is.multiple = F))
)
),
conditionalPanel("input.comparison_plot == 'Treemap'",
fluidRow(
column(6,drawTreeMapUI("treemap_compare1",is.multiple = F)),
column(6,drawTreeMapUI("treemap_compare2",is.multiple = F))
)
)
),
tabItem("analysis",
drawHeatmapUI("heatmap"))
)
)
)
|
564a95d83be7184c25e4953fc74f13401f3970ba | b6ed5857732c3261abab33a6665e7193d6862aef | /tests/testthat/test-read-oneshot-eav.R | d2b16cc13808d4cf760c57f846c793651568b48e | [
"MIT"
] | permissive | cran/REDCapR | 5ac1ebdb03fbf7dfa1aab23a2c23f711adcd4847 | a1aa09eb27fb627207255018fa41e30fa5d4b0fc | refs/heads/master | 2022-08-27T14:49:33.798497 | 2022-08-10T15:10:18 | 2022-08-10T15:10:18 | 24,255,971 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,860 | r | test-read-oneshot-eav.R | library(testthat)
credential <- retrieve_credential_testing()
update_expectation <- FALSE
test_that("smoke test", {
testthat::skip_on_cran()
expect_message(
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token)
)
})
test_that("default", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/default.R"
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(
redcap_uri = credential$redcap_uri,
token = credential$token
)
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame) # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_true(returned_object$filter_logic=="", "A filter was not specified.")
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("specify-forms", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/specify-forms.R"
desired_forms <- c("demographics", "race_and_ethnicity")
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token, forms=desired_forms)
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame) # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_true(returned_object$filter_logic=="", "A filter was not specified.")
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("raw", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/raw.R"
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token, raw_or_label="raw")
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct") # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_true(returned_object$filter_logic=="", "A filter was not specified.")
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("raw-and-dag", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/raw-and-dag.R"
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token, raw_or_label="raw", export_data_access_groups=TRUE)
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct") # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_true(returned_object$filter_logic=="", "A filter was not specified.")
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("label-and-dag", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/label-and-dag.R"
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token, raw_or_label="label", export_data_access_groups=TRUE)
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct") # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_true(returned_object$filter_logic=="", "A filter was not specified.")
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("label-header", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/label-header.R"
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token, raw_or_label_headers="label")
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_true(returned_object$filter_logic=="", "A filter was not specified.")
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("filter-numeric", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/filter-numeric.R"
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
filter <- "[age] >= 61"
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token, filter_logic=filter)
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct") # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_equal(returned_object$filter_logic, filter)
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("filter-character", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/filter-character.R"
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
filter <- "[name_first] = 'John Lee'"
expect_message(
regexp = expected_outcome_message,
returned_object <- REDCapR:::redcap_read_oneshot_eav(redcap_uri=credential$redcap_uri, token=credential$token, filter_logic=filter)
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct") # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_equal(returned_object$filter_logic, filter)
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("date-range", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/read-oneshot-eav/default.R"
expected_outcome_message <- "\\d+ records and \\d+ columns were read from REDCap in \\d+(\\.\\d+\\W|\\W)seconds\\."
start <- as.POSIXct(strptime("2018-08-01 03:00", "%Y-%m-%d %H:%M"))
stop <- Sys.time()
expect_message(
regexp = expected_outcome_message,
returned_object <-
REDCapR:::redcap_read_oneshot_eav(
redcap_uri = credential$redcap_uri,
token = credential$token,
datetime_range_begin = start,
datetime_range_end = stop
)
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_true(returned_object$records_collapsed=="", "A subset of records was not requested.")
expect_true(returned_object$fields_collapsed=="", "A subset of fields was not requested.")
expect_equal(returned_object$filter_logic, "")
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
})
test_that("bad token -Error", {
testthat::skip_on_cran()
expected_outcome_message <- "The REDCapR record export operation was not successful\\."
expect_error(
regexp = expected_outcome_message,
REDCapR:::redcap_read_oneshot_eav(
redcap_uri = credential$redcap_uri,
token = "BAD00000000000000000000000000000"
)
)
})
rm(credential)
|
ae049e4f7dded0c1877205b17e89aab67356d759 | cf4263e82b2c118bc3ecea5dc62d561e7487cbd3 | /tests/testthat/test_flatten_data.R | 327e274c4b13ccbaaa4edf5a2d6be774fcc94394 | [
"MIT"
] | permissive | EDIorg/ecocomDP | 151a2d519ff740d466fafab74df5171a6ef196bf | 0554d64ce81f35ed59985d9d991203d88fe1621f | refs/heads/main | 2023-08-14T02:07:19.274860 | 2023-06-19T22:27:30 | 2023-06-19T22:27:30 | 94,339,321 | 26 | 10 | NOASSERTION | 2023-07-26T22:21:00 | 2017-06-14T14:22:43 | R | UTF-8 | R | false | false | 7,103 | r | test_flatten_data.R | context("flatten_data()")
# Compare L0 flat and L1 flat - The column names and values of the L0 flat and L1 flattened tables should match, with an exception:
# 1.) Primary keys, row identifiers, of the ancillary tables are now present.
# Column presence -------------------------------------------------------------
testthat::test_that("Column presence", {
for (i in c("df", "tbbl")) {
# Parameterize
if (i == "df") { # test w/data.frame
L0_flat <- as.data.frame(ants_L0_flat)
for (tbl in names(ants_L1$tables)) {
ants_L1$tables[[tbl]] <- as.data.frame(ants_L1$tables[[tbl]])
}
} else { # test w/tibble
L0_flat <- ants_L0_flat
}
crit <- read_criteria()
L1_flat <- flatten_data(ants_L1$tables)
# Adjust L0 flat to our expectations
L0_flat$location_name <- NA_character_ # Add exception
# TEST: All L0 flat columns (with above exceptions) should be in L1 flat
cols_missing_from_L1 <- base::setdiff(colnames(L0_flat), colnames(L1_flat))
expect_true(length(cols_missing_from_L1) == 0)
# TEST: All L1 flat columns should be in L0 flat
cols_missing_from_L0 <- base::setdiff(colnames(L1_flat), colnames(L0_flat))
expect_true(length(cols_missing_from_L0) == 0)
}
})
# Column classes --------------------------------------------------------------
testthat::test_that("Column classes", {
for (i in c("df", "tbbl")) {
# Parameterize
if (i == "df") { # test w/data.frame
L0_flat <- as.data.frame(ants_L0_flat)
for (tbl in names(ants_L1$tables)) {
ants_L1$tables[[tbl]] <- as.data.frame(ants_L1$tables[[tbl]])
}
} else { # test w/tibble
L0_flat <- ants_L0_flat
}
crit <- read_criteria()
L1_flat <- flatten_data(ants_L1$tables)
# TEST: flatten_data() applies a set of "smart" class coercions to return numeric values stored in the L1 as character back to their original numeric class. The following code tests that column classifications in L1 should be "similar" to those in L0.
L0_classes <- unlist(lapply(L0_flat, class))
L1_classes <- unlist(lapply(L1_flat, class))
# Harmonize classes (because there is some variation) before comparing
L0_classes[stringr::str_detect(names(L0_classes), "id")] <- "character" # identifiers should be character
L1_classes[stringr::str_detect(names(L1_classes), "id")] <- "character"
L0_classes[stringr::str_detect(L0_classes, "integer")] <- "numeric" # integer ~= numeric
L1_classes[stringr::str_detect(L1_classes, "integer")] <- "numeric"
# TEST: Compare col classes
for (c in seq(L1_classes)) {
col <- L1_classes[c]
if (names(col) %in% names(L0_classes)) {
use_i <- names(L0_classes) %in% names(col)
if (any(use_i)) {
expect_equal(L0_classes[use_i], col)
}
}
}
}
})
# Observations (rows) match ---------------------------------------------------
# TODO Implement this test?
# testthat::test_that("Observations (rows) match", {
# # Parameterize
# crit <- read_criteria()
# L0_flat <- ants_L0_flat
# L1_flat <- ecocomDP::flatten_data(ants_L1$tables)
# # Adjust L0 flat to our expectations
# L0_flat <- L0_flat %>%
# dplyr::select(-block) %>% # A higher level location lost when flattened
# dplyr::select(-author) %>% # Columns of NA are dropped when flattened
# dplyr::rename(location_name = plot) # Becomes "location_name" when flattened
# # TEST: Observation "A" in L0 flat has the same values in observation "A" of L1 flat
# # TODO observation_id are identical
# # TODO match cols and sort, then compare (some subset?)
# })
# Non-required columns --------------------------------------------------------
# Non-required columns of ecocomDP aren't required by flatten_data()
testthat::test_that("Non-required columns", {
for (i in c("df", "tbbl")) {
# Parameterize
if (i == "df") { # test w/data.frame
for (tbl in names(ants_L1$tables)) {
ants_L1$tables[[tbl]] <- as.data.frame(ants_L1$tables[[tbl]])
}
}
# Parameterize
crit <- read_criteria() %>%
dplyr::filter(required == TRUE, !is.na(column)) %>%
dplyr::select(table, column)
tbls <- ants_L1$tables
# Throw out all non-required columns
for (tname in names(tbls)) {
rqd <- crit$column[crit$table %in% tname]
tbls[[tname]] <- tbls[[tname]] %>% dplyr::select(dplyr::any_of(rqd))
}
# TEST: Missing non-required columns isn't an issue
L1_flat <- ecocomDP::flatten_data(tbls)
cols_in <- unname(unlist(lapply(tbls, colnames)))
cols_out <- colnames(L1_flat)
dif <- base::setdiff(cols_in, cols_out)
expect_equal(dif, # Difference is a set of cols that shouldn't be returned by anyway
c("location_ancillary_id", "taxon_ancillary_id", "observation_ancillary_id",
"variable_mapping_id", "table_name"))
}
})
# flatten_location() ----------------------------------------------------------
# location_name values are parsed into the original L0 column representation
testthat::test_that("flatten_location(): No nesting", {
loc <- tidyr::as_tibble( # A table demonstrating this use case
data.frame(
location_id = c("H1"),
location_name = c("Highest__1"),
latitude = 45,
longitude = 123,
elevation = 200,
parent_location_id = NA_character_,
stringsAsFactors = FALSE))
for (i in c("df", "tbbl")) {
# Parameterize
if (i == "df") { # test w/data.frame
loc <- as.data.frame(loc)
}
# Parameterize
res <- flatten_location(loc)
loc_flat <- res$location_flat
# TEST: Original columns of data are returned
expect_true(all(c("Highest") %in% colnames(loc_flat))) # column names
expect_equal(loc_flat$Highest, "1") # values
}
})
testthat::test_that("flatten_location(): 3 nested sites", {
loc <- tidyr::as_tibble( # A table demonstrating this use case
data.frame(
location_id = c("H1", "M2", "L3"),
location_name = c("Highest__1", "Middle__2", "Lowest__3"),
latitude = c(NA, NA, 45),
longitude = c(NA, NA, 123),
elevation = c(NA, NA, 200),
parent_location_id = c(NA_character_, "H1", "M2"),
stringsAsFactors = FALSE))
for (i in c("df", "tbbl")) {
# Parameterize
if (i == "df") { # test w/data.frame
loc <- as.data.frame(loc)
}
# Parameterize
res <- flatten_location(loc)
loc_flat <- res$location_flat
# TEST: Original columns of data are returned
expect_true(all(c("Highest", "Middle", "Lowest") %in% colnames(loc_flat))) # column names
expect_equal(loc_flat$Highest, "1") # values
expect_equal(loc_flat$Middle, "2")
expect_equal(loc_flat$Lowest, "3")
# TEST: Original columns are returned in the order of nesting
expect_equal(which(colnames(loc_flat) %in% "Highest"), 3)
expect_equal(which(colnames(loc_flat) %in% "Middle"), 4)
expect_equal(which(colnames(loc_flat) %in% "Lowest"), 5)
}
})
|
b86c261c092bf8de25ef965c9d593fe8bc2c1c47 | 8d28b939007e0887f3a1af5b54a24c68dd3d4204 | /man/rayleighQlink.Rd | d99efc6fd1fdf6169706cd149907ed4706f3e5a4 | [] | no_license | cran/VGAMextra | 897c59ab2b532b0aa1d4011130db79f5c95eb443 | ac7e3df54136fd4c9e49b754f6747a11d7c3b122 | refs/heads/master | 2021-06-06T03:52:23.167971 | 2021-05-24T03:10:07 | 2021-05-24T03:10:07 | 138,900,855 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,858 | rd | rayleighQlink.Rd | \name{rayleighQlink}
\alias{rayleighQlink}
\title{
Link functions for the quantiles of several 1--parameter continuous
distributions
}
\description{
Computes the \code{rayleighQlink} transformation, its inverse and the
first two derivatives.
%
}
\usage{
rayleighQlink(theta, p = stop("Argument 'p' must be specified."),
bvalue = NULL, inverse = FALSE,
deriv = 0, short = TRUE, tag = FALSE)
%
}
\arguments{
\item{theta}{
Numeric or character. It is \eqn{\theta}{theta} by default, although
it may be \eqn{\eta}{eta}. See \code{\link[VGAM:Links]{Links}} for
additional details about this.
}
\item{p}{
Numeric. A single value between 0.0 and 1.0.
It is the \eqn{p}--quantile to be modeled by this link function.
}
\item{bvalue, inverse, deriv, short, tag}{
See \code{\link[VGAM:Links]{Links}}.
}
}
\details{
This link function directly models any \eqn{p}--quantile of the
Rayleigh distribution specified by the argument \code{p}.
It is called the \code{rayleighQlink} transformation defined as
%
\deqn{b \sqrt{-2 \log(1 - p)},}{b * sqrt(-2 * log(1 - p)),}
%
where \eqn{b > 0} is a scale parameter as in
\code{\link[VGAM:rayleigh]{rayleigh}}.
Numerical values of \eqn{b} or \eqn{p} out of range may
result in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
If \code{theta} is character, then arguments \code{inverse} and
\code{deriv} are discarded.
}
\value{
For \code{deriv = 0}, the \code{rayleighQlink} transformation of
\code{theta}, when {inverse = FALSE}. If \code{inverse = TRUE}, then
this function returns \code{theta / sqrt(-2 log(1 - p))}.
For \code{deriv = 1}, then the function returns
\eqn{d} \code{eta} / \eqn{d} \code{theta},
if \code{inverse = FALSE}. If \code{inverse = TRUE}, then
\eqn{d} \code{theta} / \eqn{d} \code{eta} as a function of \code{theta}.
If {deriv = 2}, then the second order derivatives in terms of
\code{theta}.
}
\author{
V. Miranda and Thomas W. Yee.
}
\note{
Numerical instability may occur for values \code{theta} too close
to zero. Use argument \code{bvalue} to replace them before
computing the link.
}
\seealso{
\code{\link[VGAM:rayleigh]{rayleigh}},
\code{\link[VGAM:Links]{Links}}.
}
\examples{
## E1. rayleighQlink() and its inverse ##
p <- 0.50 ## Modeling the median
my.b <- seq(0, 5, by = 0.1)[-1]
max(my.b - rayleighQlink(rayleighQlink(my.b, p = p), p = p, inverse =TRUE)) ## Zero
## E2. Special values ##
rayleighQlink(theta = c(Inf, -Inf, NA, NaN), p = p)
## E3. Use of argument 'bvalue' ##
rayleighQlink(theta = seq(-0.2, 1.0, by = 0.1), p = p) # WARNING: NaNs if theta <= 0
rayleighQlink(theta = seq(-0.2, 1.0, by = 0.1), p = p, bvalue = .Machine$double.xmin)
}
|
8ce7a9d3e16bf2b520b938c008850a5ca1577fb8 | 92456ce1d280dd99f0df1cc2a2567c5021286f03 | /R/prepare_data.R | 5c8fabf25b3ad3505598af1c3c14f7a6948f57d1 | [] | no_license | nzfarhad/AFG_MSNA_19_Analysis | 41643620a065ff3eaba40779624101b55562efe4 | 66b4cfe032b7665475606dcab5eae4fcacba0e9c | refs/heads/master | 2020-07-28T17:27:34.829098 | 2020-01-28T10:01:02 | 2020-01-28T10:01:02 | 209,478,936 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 100,120 | r | prepare_data.R | # Title: Preparation of data for woa survey
# Authors: Sayed Nabizada, Jarod Lapp, Christopher Jarvis,
# Date created: 20/09/2019
# Date last changed: 25/09/2019
# Purpose: This script is for recoding variables in the whole of
# of Afghanistan survey data
# indicators and composite scores are created.
# setup analysis environment
source("./R/source.R")
library(msni19)
# character operation
ch<-as.character
chr<-as.character
coerc<-function(x){as.numeric(chr(x))}
# load data
# data <- read_excel(master_data, sheet = "MSNA_AFG_19_parent_sheet", na = c("","NA"), guess_max = 3000)
# overall_muac_data <- read_excel(master_data, sheet = "MSNA_AFG_19_muac" , na = c("","NA"))
# overall_hh_roster <- read_excel(master_data, sheet = "MSNA_AFG_19_hh_roster" , na = c("","NA"))
# overall_death_roster <- read_excel(master_data, sheet = "MSNA_AFG_19_hh_death_roster" , na = c("","NA"))
# overall_left_roster <- read_excel( master_data, sheet = "MSNA_AFG_19_hh_left_roster" , na = c("","NA"))
# data <- read.csv("input/data/clean/MSNA_AFG_19_parent_sheet.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
# overall_muac_data <- read.csv("input/data/clean/MSNA_AFG_19_muac.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
# overall_hh_roster <- read.csv("input/data/clean/MSNA_AFG_19_hh_roster.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
# overall_death_roster <- read.csv("input/data/clean/MSNA_AFG_19_hh_death_roster.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
# overall_left_roster <- read.csv("input/data/clean/MSNA_AFG_19_hh_left_roster.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
#
data <- read.csv("input/data/clean/complete_with_farah/MSNA_AFG_19_parent_sheet.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
overall_muac_data <- read.csv("input/data/clean/complete_with_farah/MSNA_AFG_19_muac.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
overall_hh_roster <- read.csv("input/data/clean/complete_with_farah/MSNA_AFG_19_hh_roster.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
overall_death_roster <- read.csv("input/data/clean/complete_with_farah/MSNA_AFG_19_hh_death_roster.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
overall_left_roster <- read.csv("input/data/clean/complete_with_farah/MSNA_AFG_19_hh_left_roster.csv",stringsAsFactors=F,na.strings = c("", "NA"), check.names = F)
# Temp for the data is exported out of kobo incorrectly.
rename1 <- function(d1) {
sub("/", ".", names(d1))
}
data$uuid <- data$`_uuid`
names(data) <- rename1(data)
names(overall_muac_data ) <- rename1(overall_muac_data )
names(overall_hh_roster ) <- rename1(overall_hh_roster )
names(overall_death_roster ) <- rename1(overall_death_roster)
names(overall_left_roster ) <- rename1(overall_left_roster)
# composite indicators #
# The composite indicators are a combination of different variables
# each value within a variable has a score and these need to be
# coded for the different categories.
# Then the variables can be summed in order to get the score
# This will be done for multiple sectors.
#### Composite indicators ############
### Food Security & Agriculture ####
# FCS
data <- data %>%
mutate(
# FCS
fcs_category_class = recode(
fcs_category,
"poor" = 4,
"borderline" = 2,
"acceptable" = 0
),
# HHS
hhs_category_class = recode(
hhs_category,
"severe_hunger" = 4,
"moderate_hunger" = 2,
"little_hunger" = 0
),
# Food Source
food_source_class = case_when(
food_source %in% c('gift', 'assistance') ~ 2,
food_source == 'borrowed' ~1,
TRUE ~ 0
),
# ag impact
ag_impact_class = case_when(
agricultural_impact_how == '76_100' ~ 3,
agricultural_impact_how == '51_75' ~ 1,
agricultural_impact %in% c('no', 'not_applicable') ~ 0,
agricultural_impact_how %in% c('0_25', '26_50' ) ~ 0
),
# livestock impact
ls_impact_class = case_when(
livestock_impact_how.livestock_died == 1 |
livestock_impact_how.left_unattended == 1 ~ 2,
livestock_impact_how.livestock_ill == 1 |
livestock_impact_how.less_milk == 1 ~ 1,
livestock_impact == 0 ~ 0,
TRUE ~ 0,
is.na(livestock_impact) ~ NA_real_
)
)
fsac_vars <- c("fcs_category_class", "hhs_category_class", "food_source_class", "ag_impact_class", "ls_impact_class")
data$fsac_score <- comp_score(data, fsac_vars)
data <- data %>%
mutate(
fsac_severity = case_when(
fsac_score <= 2 ~ 1,
fsac_score <= 5 ~ 2,
fsac_score <= 8 ~ 3,
fsac_score <= 16 ~ 4
),
fsac_sev_high = case_when(
fsac_severity <= 2 ~ 0,
fsac_severity <= 4 ~ 1
)
)
##################################################################
### Protection ####
# First setup the variables required to calculate the indicators and then calculate them
# This way around if the weights are changed then it's all in one place.
# protection incidents
severe_prot_incidents_vars <- c(
"adult_prot_incidents.assaulted_with_weapon",
"child_prot_incidents.assaulted_with_weapon",
"adult_prot_incidents.forced_work",
"child_prot_incidents.forced_work",
"adult_prot_incidents.forcibly_detained",
"child_prot_incidents.forcibly_detained",
"adult_prot_incidents.hindered_leave_settlement",
"child_prot_incidents.hindered_leave_settlement",
#### added from less_severe_prot_incidents
"adult_prot_incidents.verbally_threatened",
"child_prot_incidents.verbally_threatened",
"adult_prot_incidents.assaulted_without_weapon",
"child_prot_incidents.assaulted_without_weapon",
"adult_prot_incidents.hindered_leave_district",
"child_prot_incidents.hindered_leave_district"
)
# less_severe_prot_incidents_vars <-c(
# "adult_prot_incidents.verbally_threatened",
# "child_prot_incidents.verbally_threatened",
# "adult_prot_incidents.assaulted_without_weapon",
# "child_prot_incidents.assaulted_without_weapon",
# "adult_prot_incidents.hindered_leave_district",
# "child_prot_incidents.hindered_leave_district"
# )
data$severe_prot_incidents <- comp_score(data, severe_prot_incidents_vars)
# data$less_severe_prot_incidents <- comp_score(data, less_severe_prot_incidents_vars)
# protection concerns
severe_prot_concerns_vars <- c(
"prot_concerns.violence_maiming",
"prot_concerns.abduction",
"prot_concerns.explosive_hazards",
"prot_concerns.psych_wellbeing",
# added from less_severe_prot_concern
"prot_concerns.violence_injuries",
"prot_concerns.early_marriage",
"prot_concerns.destruction_property",
"prot_concerns.theft"
)
# less_severe_prot_concerns_vars <- c(
# "prot_concerns.violence_injuries",
# "prot_concerns.early_marriage",
# "prot_concerns.destruction_property",
# "prot_concerns.theft"
# )
data$severe_prot_concerns <- comp_score(data, severe_prot_concerns_vars)
# data$less_severe_prot_concerns <- comp_score(data, less_severe_prot_concerns_vars)
# explosive hazards
severe_explosive_hazards_vars <- c(
"explosive_impact.injury_death",
"explosive_impact.access_services",
"explosive_impact.relocation",
"explosive_impact.livelihoods_impact",
"explosive_impact.psych_impact"
)
less_severe_explosive_hazards_vars <- c(
"explosive_impact.restrict_recreation"
)
data$severe_explosive_hazards <- comp_score(data, severe_explosive_hazards_vars)
data$less_severe_explosive_hazards <- comp_score(data, less_severe_explosive_hazards_vars)
# tazkira
tazkira_total_vars <- c(
"adult_tazkira",
"child_tazkira")
data$tazkira_total <- comp_score(data, tazkira_total_vars)
children_working_yes_no_2 = case_when(
data$children_working == 0 ~ "0",
data$children_working >= 1 ~ "1 or more",
TRUE ~ NA_character_
)
# Protection Severity Score
## Weights
data <- data %>%
mutate(
prot_incident_class = case_when(
severe_prot_incidents >= 1 ~ 3,
# severe_prot_incidents == 0 & data$less_severe_prot_incidents >= 1 ~ 2,
TRUE ~ 0),
# violence targeting women, girls, boys
sgbv_incidents_class = case_when(
other_incidents.sgbv == 1 | other_concerns.sgbv == 1 ~ 2,
TRUE ~ 0
),
# children working unsafe conditions
children_work_safety_class = case_when(
children_working_yes_no_2 =='1 or more' ~ 1,
TRUE ~ 0
),
prot_concerns_class = case_when(
severe_prot_concerns >= 1 ~ 3,
# severe_prot_concerns == 0 & data$less_severe_prot_concerns >= 1 ~ 2,
TRUE ~ 0
),
# hh members injured conflict or nat disaster
injuries_class = case_when(
adult_injuries_cause %in% c('conflict', 'natural_disaster') |
child_injuries_cause %in% c('conflict', 'natural_disaster') ~ 3,
TRUE ~ 0
),
prot_explosive_hazards_class = case_when(
severe_explosive_hazards >= 1 ~ 3,
severe_explosive_hazards == 0 & less_severe_explosive_hazards >=1 ~ 2,
TRUE ~ 0
),
tazkira_class = case_when(
tazkira_total == 0 ~ 2,
tazkira_total > 0 & tazkira_total < hh_size ~ 1
)
)
# Score
prot_score_vars <- c(
"prot_incident_class",
"sgbv_incidents_class",
"children_work_safety_class",
"prot_concerns_class",
"injuries_class",
"prot_explosive_hazards_class",
"tazkira_class")
data$prot_score <- comp_score(data, prot_score_vars)
data <- data %>%
mutate(
prot_severity = case_when(
prot_score <= 2 ~ 1,
prot_score <= 5 ~ 2,
prot_score <= 8 ~ 3,
prot_score <= 18 ~ 4
),
prot_sev_high = case_when(
prot_severity >= 3 ~ 1,
TRUE ~ 0
)
)
################## protection new indicator 1 ######################
prot_all_indictors <- c(
"adult_prot_incidents.verbally_threatened",
"adult_prot_incidents.assaulted_without_weapon",
"adult_prot_incidents.assaulted_with_weapon",
"adult_prot_incidents.hindered_leave_settlement",
"adult_prot_incidents.hindered_leave_district",
"adult_prot_incidents.forced_work",
"adult_prot_incidents.forcibly_detained",
"child_prot_incidents.verbally_threatened",
"child_prot_incidents.assaulted_without_weapon",
"child_prot_incidents.assaulted_with_weapon",
"child_prot_incidents.hindered_leave_settlement",
"child_prot_incidents.hindered_leave_district",
"child_prot_incidents.forced_work",
"child_prot_incidents.forcibly_detained",
"other_incidents.sgbv",
"other_incidents.other",
"prot_concerns.violence_maiming",
"prot_concerns.violence_injuries",
"prot_concerns.psych_wellbeing",
"prot_concerns.abduction",
"prot_concerns.theft",
"prot_concerns.explosive_hazards",
"prot_concerns.destruction_property",
"prot_concerns.early_marriage",
"prot_concerns.other",
"other_concerns.sgbv",
"other_concerns.other"
)
data$prot_all_indictors_score <- comp_score(data, prot_all_indictors)
data <- data %>%
mutate(
prot_new_indicator_1 = case_when(
prot_all_indictors_score >= 1 ~ ">=1",
prot_all_indictors_score == 0 ~ "0",
TRUE ~ NA_character_
)
)
################## protection new indicator 2 ######################
data <- data %>%
mutate( displ_explosive_presence_na_to_0 = case_when(
displ_explosive_presence == "both" ~ 1,
displ_explosive_presence == "current" ~ 1,
displ_explosive_presence == "previous" ~ 1,
displ_explosive_presence == "no" ~ 0,
TRUE ~ 0
)
)
prot_all_indictors_2 <- c(
"adult_prot_incidents.verbally_threatened",
"adult_prot_incidents.assaulted_without_weapon",
"adult_prot_incidents.assaulted_with_weapon",
"adult_prot_incidents.hindered_leave_settlement",
"adult_prot_incidents.hindered_leave_district",
"adult_prot_incidents.forced_work",
"adult_prot_incidents.forcibly_detained",
"child_prot_incidents.verbally_threatened",
"child_prot_incidents.assaulted_without_weapon",
"child_prot_incidents.assaulted_with_weapon",
"child_prot_incidents.hindered_leave_settlement",
"child_prot_incidents.hindered_leave_district",
"child_prot_incidents.forced_work",
"child_prot_incidents.forcibly_detained",
"other_incidents.sgbv",
"other_incidents.other",
"prot_concerns.violence_maiming",
"prot_concerns.violence_injuries",
"prot_concerns.psych_wellbeing",
"prot_concerns.abduction",
"prot_concerns.theft",
"prot_concerns.explosive_hazards",
"prot_concerns.destruction_property",
"prot_concerns.early_marriage",
"prot_concerns.other",
"other_concerns.sgbv",
"other_concerns.other",
"displ_explosive_presence_na_to_0"
)
data$prot_all_indictors_score_2 <- comp_score(data, prot_all_indictors_2)
data <- data %>%
mutate(
prot_new_indicator_2 = case_when(
prot_all_indictors_score_2 >= 1 ~ ">=1",
prot_all_indictors_score_2 == 0 ~ "0",
TRUE ~ NA_character_
)
)
################## protection new indicator 3 ######################
data <- data %>%
mutate( nondispl_explosive_presence_na_to_0 = case_when(
nondispl_explosive_presence == "yes" ~ 1,
nondispl_explosive_presence == "no" ~ 0,
TRUE ~ 0
)
)
prot_all_indictors_3 <- c(
"adult_prot_incidents.verbally_threatened",
"adult_prot_incidents.assaulted_without_weapon",
"adult_prot_incidents.assaulted_with_weapon",
"adult_prot_incidents.hindered_leave_settlement",
"adult_prot_incidents.hindered_leave_district",
"adult_prot_incidents.forced_work",
"adult_prot_incidents.forcibly_detained",
"child_prot_incidents.verbally_threatened",
"child_prot_incidents.assaulted_without_weapon",
"child_prot_incidents.assaulted_with_weapon",
"child_prot_incidents.hindered_leave_settlement",
"child_prot_incidents.hindered_leave_district",
"child_prot_incidents.forced_work",
"child_prot_incidents.forcibly_detained",
"other_incidents.sgbv",
"other_incidents.other",
"prot_concerns.violence_maiming",
"prot_concerns.violence_injuries",
"prot_concerns.psych_wellbeing",
"prot_concerns.abduction",
"prot_concerns.theft",
"prot_concerns.explosive_hazards",
"prot_concerns.destruction_property",
"prot_concerns.early_marriage",
"prot_concerns.other",
"other_concerns.sgbv",
"other_concerns.other",
"displ_explosive_presence_na_to_0",
"nondispl_explosive_presence_na_to_0"
)
data$prot_all_indictors_score_3 <- comp_score(data, prot_all_indictors_3)
data <- data %>%
mutate(
prot_new_indicator_3 = case_when(
prot_all_indictors_score_3 >= 1 ~ ">=1",
prot_all_indictors_score_3 == 0 ~ "0",
TRUE ~ NA_character_
)
)
################## protection new indicator 4 ######################
data <- data %>%
mutate(
lcsi_category_class2 = case_when(
lcsi_category == "food_secure" | lcsi_category == "marginally_insecure" ~ 0,
lcsi_category == "moderately_insecure" | lcsi_category == "severely_insecure" ~ 1,
TRUE ~ 0
)
)
prot_all_indictors_4 <- c(
"adult_prot_incidents.verbally_threatened",
"adult_prot_incidents.assaulted_without_weapon",
"adult_prot_incidents.assaulted_with_weapon",
"adult_prot_incidents.hindered_leave_settlement",
"adult_prot_incidents.hindered_leave_district",
"adult_prot_incidents.forced_work",
"adult_prot_incidents.forcibly_detained",
"child_prot_incidents.verbally_threatened",
"child_prot_incidents.assaulted_without_weapon",
"child_prot_incidents.assaulted_with_weapon",
"child_prot_incidents.hindered_leave_settlement",
"child_prot_incidents.hindered_leave_district",
"child_prot_incidents.forced_work",
"child_prot_incidents.forcibly_detained",
"other_incidents.sgbv",
"other_incidents.other",
"prot_concerns.violence_maiming",
"prot_concerns.violence_injuries",
"prot_concerns.psych_wellbeing",
"prot_concerns.abduction",
"prot_concerns.theft",
"prot_concerns.explosive_hazards",
"prot_concerns.destruction_property",
"prot_concerns.early_marriage",
"prot_concerns.other",
"other_concerns.sgbv",
"other_concerns.other",
"displ_explosive_presence_na_to_0",
"nondispl_explosive_presence_na_to_0",
"lcsi_category_class2"
)
data$prot_all_indictors_score_4 <- comp_score(data, prot_all_indictors_4)
data <- data %>%
mutate(
prot_new_indicator_4 = case_when(
prot_all_indictors_score_4 >= 1 ~ ">=1",
prot_all_indictors_score_4 == 0 ~ "0",
TRUE ~ NA_character_
)
)
#################################################################
################## protection new indicator 5 ######################
prot_all_indictors_5 <- c(
"adult_prot_incidents.verbally_threatened",
"adult_prot_incidents.assaulted_without_weapon",
"adult_prot_incidents.assaulted_with_weapon",
"adult_prot_incidents.hindered_leave_settlement",
"adult_prot_incidents.hindered_leave_district",
"adult_prot_incidents.forced_work",
"adult_prot_incidents.forcibly_detained",
"child_prot_incidents.verbally_threatened",
"child_prot_incidents.assaulted_without_weapon",
"child_prot_incidents.assaulted_with_weapon",
"child_prot_incidents.hindered_leave_settlement",
"child_prot_incidents.hindered_leave_district",
"child_prot_incidents.forced_work",
"child_prot_incidents.forcibly_detained",
"other_incidents.sgbv",
"other_incidents.other",
"prot_concerns.violence_maiming",
"prot_concerns.violence_injuries",
"prot_concerns.psych_wellbeing",
"prot_concerns.abduction",
"prot_concerns.theft",
"prot_concerns.explosive_hazards",
"prot_concerns.destruction_property",
"prot_concerns.early_marriage",
"prot_concerns.other",
"other_concerns.sgbv",
"other_concerns.other",
"displ_explosive_presence_na_to_0",
"nondispl_explosive_presence_na_to_0",
"lcsi_category_class2",
"children_work_safety_class"
)
data$prot_all_indictors_score_5 <- comp_score(data, prot_all_indictors_5)
data <- data %>%
mutate(
prot_new_indicator_5 = case_when(
prot_all_indictors_score_5 >= 1 ~ ">=1",
prot_all_indictors_score_5 == 0 ~ "0",
TRUE ~ NA_character_
)
)
################## protection new indicator 6 ######################
data <- data %>%
mutate(
other_impact_class = case_when(
other_impact.injury_death == 1 | other_impact.new_mines == 1 ~ 1,
TRUE ~ 0
)
)
prot_all_indictors_6 <- c(
"adult_prot_incidents.verbally_threatened",
"adult_prot_incidents.assaulted_without_weapon",
"adult_prot_incidents.assaulted_with_weapon",
"adult_prot_incidents.hindered_leave_settlement",
"adult_prot_incidents.hindered_leave_district",
"adult_prot_incidents.forced_work",
"adult_prot_incidents.forcibly_detained",
"child_prot_incidents.verbally_threatened",
"child_prot_incidents.assaulted_without_weapon",
"child_prot_incidents.assaulted_with_weapon",
"child_prot_incidents.hindered_leave_settlement",
"child_prot_incidents.hindered_leave_district",
"child_prot_incidents.forced_work",
"child_prot_incidents.forcibly_detained",
"other_incidents.sgbv",
"other_incidents.other",
"prot_concerns.violence_maiming",
"prot_concerns.violence_injuries",
"prot_concerns.psych_wellbeing",
"prot_concerns.abduction",
"prot_concerns.theft",
"prot_concerns.explosive_hazards",
"prot_concerns.destruction_property",
"prot_concerns.early_marriage",
"prot_concerns.other",
"other_concerns.sgbv",
"other_concerns.other",
"displ_explosive_presence_na_to_0",
"nondispl_explosive_presence_na_to_0",
"lcsi_category_class2",
"other_impact_class"
)
data$prot_all_indictors_score_6 <- comp_score(data, prot_all_indictors_6)
data <- data %>%
mutate(
prot_new_indicator_6 = case_when(
prot_all_indictors_score_6 >= 1 ~ ">=1",
prot_all_indictors_score_6 == 0 ~ "0",
TRUE ~ NA_character_
)
)
###################################################end
### ESNFI ####
# shelter type
data$shelter_class<-ifelse(data$shelter == 'open_space',3,ifelse(data$shelter == 'tent' | data$shelter == 'makeshift_shelter' | data$shelter == 'collective_centre' | data$shelter == 'transitional',2,0))
# shelter damage
data$shelter_damage_class<-ifelse(data$shelter_damage_extent== 'fully_destroyed' & data$shelter_damage_repair == 'no',3,
ifelse(data$shelter_damage_extent== 'significant_damage' & data$shelter_damage_repair == 'no',2,
ifelse(data$shelter_damage_extent== 'partial_damage' & data$shelter_damage_repair == 'no',1,0)))
data$shelter_damage_class[is.na(data$shelter_damage_class)] <- 0
# TENENCY AGREEMENT
data$tenancy_class<-ifelse(data$tenancy == 'unofficial',3,ifelse(data$tenancy == 'own_home_without_doc' | data$tenancy == 'rental_verbal' | data$shelter_hosted == 'yes',2,0))
data$tenancy_class[is.na(data$tenancy_class)] <- 0
# blankets
data$blankets_class<-ifelse(data$blankets_number > data$hh_size,3,0)
data$blankets_class[is.na(data$blankets_class)] <- 0
# basic needs
data$sleeping_mats <- car::recode(data$sleeping_mats, " 'yes' = 1; 'no' = 0")
data$tarpaulin <- car::recode(data$tarpaulin, " 'yes' = 1; 'no' = 0")
data$cooking_pots <- car::recode(data$cooking_pots, " 'yes' = 1; 'no' = 0")
data$stainless_steel <- car::recode(data$stainless_steel, " 'yes' = 1; 'no' = 0")
data$water_storage <- car::recode(data$water_storage, " 'yes' = 1; 'no' = 0")
data$hygiene_sanitation <- car::recode(data$hygiene_sanitation, " 'yes' = 1; 'no' = 0")
data$basic_needs_total<-coerc(data[["sleeping_mats"]])+coerc(data[["tarpaulin"]])+coerc(data[["cooking_pots"]])+coerc(data[["stainless_steel"]])+coerc(data[["water_storage"]])+coerc(data[["hygiene_sanitation"]])
data$basic_needs_score<-car::recode(data$basic_needs_total,
"0:2=3;
3:5=2;
6=0")
# ESNFI Severity Score
data$esnfi_score<-coerc(data[["shelter_class"]])+coerc(data[["shelter_damage_class"]])+coerc(data[["tenancy_class"]])+coerc(data[["blankets_class"]])+coerc(data[["basic_needs_score"]])
data$esnfi_severity<-car::recode(data$esnfi_score,
"0:2='1';
3:6='2';
7:9='3';
10:16='4'")
data$esnfi_sev_high<-ifelse(data$esnfi_severity==3|data$esnfi_severity==4,1,0)
###################################################end
### <3 <3 <3 <3 <3 ESNFI 4 ARI <3 <3 <3 <3 <3 ####
# shelter type
data$shelter_class_4_ari<-case_when(data$shelter == 'open_space'| data$shelter == 'tent' | data$shelter == 'makeshift_shelter'| data$shelter == 'collective_centre' ~3, data$shelter == 'transitional'~2,
data$shelter=='permanent' & (data$shelter_hosted_why =='cash_rent'| data$shelter_hosted_why =='trans_shelter_host_family'| data$shelter_hosted_why =='materials_tools_extend') ~2, TRUE~ 0)
# shelter damage
data$shelter_damage_class_4_ari<-ifelse(data$shelter_damage_extent== 'fully_destroyed' & data$shelter_damage_repair == 'no',3,
ifelse(data$shelter_damage_extent== 'significant_damage' & data$shelter_damage_repair == 'no',2,
ifelse(data$shelter_damage_extent== 'partial_damage' & data$shelter_damage_repair == 'no',1,0)))
data$shelter_damage_class_4_ari[is.na(data$shelter_damage_class_4_ari)] <- 0
# TENENCY AGREEMENT
data$tenancy_class_4_ari<-ifelse(data$tenancy == 'unofficial',3,ifelse(data$tenancy == 'own_home_without_doc' | data$tenancy == 'rental_verbal' | data$shelter_hosted == 'yes',2,0))
data$tenancy_class_4_ari[is.na(data$tenancy_class_4_ari)] <- 0
# ESNFI Severity Score
data$esnfi_score_4_ari<-coerc(data[["shelter_class_4_ari"]])+coerc(data[["shelter_damage_class_4_ari"]])+coerc(data[["tenancy_class_4_ari"]])
data$esnfi_severity_4_ari<-car::recode(data$esnfi_score_4_ari,
"0:2='1';
3:4='2';
5:6='3';
7:10='4'")
data$esnfi_sev_high_4_ari<-ifelse(data$esnfi_severity_4_ari==3|data$esnfi_severity_4_ari==4,1,0)
#################################################################
### WASH ####
# water source #
data$water_source_class<-car::recode(data$water_source,
"'surface_water'=3;
'water_trucking'=2;
'spring_unprotected'=2;
'spring_protected'=0;
'handpump_private'=0;
'handpump_public'=0;
'piped_public'=0;
'other'=0")
# water barriers
data$water_barriers_class<-ifelse(data$water_sufficiency== 'insufficient' &
(data$water_barriers== 'too_far' |
data$water_barriers== 'high_risk' |
data$water_barriers== 'social_restrictions'),
3,ifelse(data$water_sufficiency== 'insufficient',2,
ifelse(data$water_sufficiency== 'barely_sufficient',1,0)))
data$water_barriers_class[is.na(data$water_barriers)] <- 0
# soap
data$soap_class<-ifelse(data$soap == 'yes_didnt_see' | data$soap == 'no', 1,0)
# latrines #
data$latrine_class<-ifelse(data$latrine == 'open' | data$latrine == 'public_latrine', 3,
ifelse(data$latrine == 'pit_latrine_uncovered',2,0))
# primary waste dispopsal #
data$waste_disposal_class<-ifelse(data$waste_disposal == 'open_space' | data$waste_disposal == 'burning', 2,0)
#distance to primary water source
data$water_distance_class<-ifelse(data$water_distance == 'over_1km'| data$water_distance == '500m_to_1km',3,0)
# WASH Severity Score
data$wash_score<-coerc(data[["water_source_class"]])+coerc(data[["water_barriers_class"]])+coerc(data[["soap_class"]])+coerc(data[["latrine_class"]])+coerc(data[["waste_disposal_class"]])+coerc(data[["water_distance_class"]])
data$wash_severity<-car::recode(data$wash_score,
"0:2='1';
3:5='2';
6:8='3';
9:16='4'")
data$wash_sev_high<-ifelse(data$wash_severity==3|data$wash_severity==4,1,0)
#################################################################
### Nutrition ####
muac_presence_analysis<-overall_muac_data %>%
group_by(`_submission__uuid`) %>%
filter(person_muac>=1) %>%
summarize(number_muac_person=sum(person_muac),
number_muac_mod_mal=sum(moderate_malnutrition),
number_muac_sev_mal=sum(severe_malnutrition),
number_muac_above_125 = sum(muac_measurement>=125, na.rm = T),
min_muac=min(muac_measurement),
ruft_reception_num = sum(rutf_reception== "yes"),
ruft_reception = sum(rutf_reception== "yes")>=1)
# Malnutrition present = 1, not present = 0
muac_presence_analysis$malnutrition_present<-ifelse(muac_presence_analysis$number_muac_mod_mal>=1 | muac_presence_analysis$number_muac_sev_mal>=1,1,0)
# join with parent table
data<-full_join(data, muac_presence_analysis, by = c("uuid"="_submission__uuid"))
# reported malnourishment (mod & sev muac)
data$muac_score<-ifelse(data$number_muac_sev_mal>1,7,ifelse(data$number_muac_sev_mal==1,6,ifelse(data$number_muac_sev_mal==0 & data$number_muac_mod_mal>1,4,ifelse(data$number_muac_sev_mal==0 & data$number_muac_mod_mal==1,3,0))))
# dietary diversity ---
# therefore nutrition compotite indicator will exclude hhs with children aged 2-5, since this they are not asked this question ###
data$dietary_div_count<-coerc(data[["minimum_dietary_diversity.staples"]])+coerc(data[["minimum_dietary_diversity.legumes"]])+coerc(data[["minimum_dietary_diversity.dairy"]])+coerc(data[["minimum_dietary_diversity.meat"]])+coerc(data[["minimum_dietary_diversity.eggs"]])+coerc(data[["minimum_dietary_diversity.vitamin_a_veg"]])+coerc(data[["minimum_dietary_diversity.other_veg"]])
data$dietary_div_score<-ifelse(data$dietary_div_count==0,4,ifelse(data$dietary_div_count==1,3,ifelse(data$dietary_div_count==2,2,ifelse(data$dietary_div_count==3,1,0))))
data$dietary_div_score[is.na(data$dietary_div_score)] <- 0
# Nutrition Severity Score
data$nut_score_hh_w_muac<-coerc(data[["muac_score"]])+coerc(data[["dietary_div_score"]])
data$nut_score<-data$nut_score_hh_w_muac
data$nut_score[is.na(data$nut_score)] <- 0
data$nut_severity<-car::recode(data$nut_score,
"0:2='1';
3:5='2';
6:8='3';
9:16='4'")
# data$nut_sev_high<-ifelse(data$nut_severity==3|data$nut_severity==4,1,0)
data$nut_sev_high<-ifelse(data$nut_severity==3|data$nut_severity==4 | data$nut_severity==2 ,1,0)
#################################################################
### Education EiE ####
education_analysis<-overall_hh_roster %>%
filter(!is.na(current_year_enrolled))
education_analysis$enrolled_and_attending<-ifelse(education_analysis$current_year_enrolled=='no',0,
ifelse(education_analysis$current_year_enrolled=='yes' & education_analysis$current_year_attending=='no',0,1))
education_analysis$total_schoolage_child<-1
#removal from school due to shock
education_analysis$shock_presence<-coerc(education_analysis[["edu_removal_shock.displacement"]])+coerc(education_analysis[["edu_removal_shock.conflict"]])+coerc(education_analysis[["edu_removal_shock.natural_disaster"]])
education_analysis$shock_presence[is.na(education_analysis$shock_presence)] <- 0
################## not part of composite #################################################
education_analysis$enrolled_1 <- if_else(education_analysis$current_year_enrolled=='no',0,1)
education_analysis$attending_1 <- if_else(education_analysis$current_year_attending=='no',0,1)
education_analysis <- education_analysis %>%
mutate(
attending_male = case_when(
current_year_attending == "yes" & hh_member_sex == "male" ~ 1,
TRUE ~ 0
),
attending_female = case_when(
current_year_attending == "yes" & hh_member_sex == "female" ~ 1,
TRUE ~ 0
),
enrolled_male = case_when(
current_year_enrolled == "yes" & hh_member_sex == "male" ~ 1,
TRUE ~ 0
),
enrolled_female = case_when(
current_year_enrolled == "yes" & hh_member_sex == "female" ~ 1,
TRUE ~ 0
),
shock_presence_male = case_when(
shock_presence > 0 & hh_member_sex == "male" ~ 1,
TRUE ~ 0
),
shock_presence_female = case_when(
shock_presence > 0 & hh_member_sex == "female" ~ 1,
TRUE ~ 0
)
)
####################################################################################
# group dataset into hh
education_analysis_hh<-education_analysis %>%
group_by(`_submission__uuid`) %>%
summarize(count_school_child=sum(total_schoolage_child),
count_enrolled_attending=sum(enrolled_and_attending),
count_current_enrolled = sum(enrolled_1, na.rm = T),
count_current_enrolled_male = sum(enrolled_male, na.rm = T),
count_current_enrolled_female = sum(enrolled_female, na.rm = T),
count_current_attending = sum(attending_1, na.rm = T),
count_current_attending_male = sum(attending_male, na.rm = T),
count_current_attending_female = sum(attending_female, na.rm = T),
count_shock=sum(shock_presence),
count_shock_male = sum(shock_presence_male, na.rm = T),
count_shock_female = sum(shock_presence_female, na.rm = T)
)
# shock weight
education_analysis_hh$shock_class<-ifelse(education_analysis_hh$count_shock >= 1, 5,0)
# percent children enrolled or attending
education_analysis_hh$percent_enrolled= coerc(education_analysis_hh[["count_enrolled_attending"]])/coerc(education_analysis_hh[["count_school_child"]])
education_analysis_hh$enroll_perc_class<-car::recode(education_analysis_hh$percent_enrolled,
"0:0.249=1;
0.25:0.499=2;
0.5:0.749=3;
0.75:1=4")
# greater than 3 children not attending
education_analysis_hh$count_not_enrolled<-coerc(education_analysis_hh[["count_school_child"]])-coerc(education_analysis_hh[["count_enrolled_attending"]])
education_analysis_hh$count_not_enrolled_class<-ifelse(education_analysis_hh$count_not_enrolled>=3,3,0)
# join with parent table
data<-full_join(data, education_analysis_hh,by = c("uuid"="_submission__uuid"))
# reasons not attending
data$severe_not_attending<-coerc(data[["boy_unattendance_reason.insecurity"]])+coerc(data[["boy_unattendance_reason.child_works_instead"]])+coerc(data[["girl_unattendance_reason.insecurity"]])+coerc(data[["girl_unattendance_reason.child_works_instead"]])
data$severe_not_attending[is.na(data$severe_not_attending)] <- 0
data$less_severe_not_attending<-coerc(data[["boy_unattendance_reason.lack_facilities"]])+coerc(data[["boy_unattendance_reason.lack_documentation"]])+coerc(data[["boy_unattendance_reason.too_expensive"]])+coerc(data[["girl_unattendance_reason.lack_facilities"]])+coerc(data[["girl_unattendance_reason.lack_documentation"]])+coerc(data[["girl_unattendance_reason.too_expensive"]])
data$less_severe_not_attending[is.na(data$less_severe_not_attending)] <- 0
data$not_attending_class<-ifelse(data$severe_not_attending >= 1,3, ifelse(data$severe_not_attending==0 & data$less_severe_not_attending >=1,2,0))
data$not_attending_class[is.na(data$not_attending_class)] <- 0
# Education Severity Score
data$edu_score_hh_w_schoolage<-coerc(data[["enroll_perc_class"]])+coerc(data[["shock_class"]])+coerc(data[["count_not_enrolled_class"]])+coerc(data[["not_attending_class"]])
data$edu_score<-data$edu_score_hh_w_schoolage
data$edu_score[is.na(data$edu_score)] <- 0
data$edu_severity<-car::recode(data$edu_score,
"0:2='1';
3:5='2';
6:8='3';
9:16='4'")
data$edu_sev_high<-ifelse(data$edu_severity==3|data$edu_severity==4,1,0)
#################################################################
### Health ####
#deaths under 5 years age
overall_death_roster$deaths_under5<-ifelse(overall_death_roster$hh_died_age<5,1,0)
# deaths >= 5
overall_death_roster$deaths_over5<-ifelse(overall_death_roster$hh_died_age>=5,1,0)
# group by hh
health_analysis<-overall_death_roster %>%
group_by(`_submission__uuid`) %>%
summarize(number_death_under5=sum(deaths_under5),
hh_member_died = sum(hh_member_died),
number_death_over5=sum(deaths_over5))
# join with parent dataset
data<-full_join(data, health_analysis,by = c("uuid"="_submission__uuid"))
data$number_death_under5[is.na(data$number_death_under5)] <- 0
# #deaths under 5 yrs age weight
# data$number_death_under5_class<-ifelse(data$number_death_under5 >= 1, 3,0)
# data$number_death_under5_class[is.na(data$number_death_under5_class)] <- 0
#
# # deaths >= 5 weight
# data$number_death_over5_class<-ifelse(data$number_death_over5 >= 1, 2,0)
# data$number_death_over5_class[is.na(data$number_death_over5_class)] <- 0
# health facility barriers
data$health_barriers_total<-coerc(data[["health_facility_barriers.unsafe"]])+coerc(data[["health_facility_barriers.cost_services"]])+coerc(data[["health_facility_barriers.cost_medicines"]])+coerc(data[["health_facility_barriers.too_far"]])+coerc(data[["health_facility_barriers.documentation_problems"]])+coerc(data[["health_facility_barriers.insufficient_female_staff"]])+coerc(data[["health_facility_barriers.treatment_refused"]])+coerc(data[["health_facility_barriers.other"]])
data$health_barriers_total[is.na(data$health_barriers_total)] <- 0
# data$health_facility_barriers_class<-ifelse(data$health_facility_access == 'no' & data$health_barriers_total>1,3,ifelse(data$health_facility_access == 'no' & data$health_barriers_total==1,2,0))
data$health_facility_barriers_class<-ifelse(data$health_facility_access == 'no' ,3,0)
# health facility distance
data$health_facility_dist_class<-ifelse(data$health_facility_distance == 'none' | data$health_facility_distance=='more_10km',3,ifelse(data$health_facility_distance=='6_10km',2,0))
# health facilities affected
data$health_facility_affected_class<-ifelse(data$health_facility_affected_how == 'forcibly_closed'|data$health_facility_affected_how == 'damaged_conflict'|data$health_facility_affected_how == 'damaged_natural_disasters',3, ifelse(data$health_facility_affected_how=='lack_staff'|data$health_facility_affected_how=='lack_medicine',2,0))
data$health_facility_affected_class[is.na(data$health_facility_affected_class)] <- 0
# health selected as priority need
data$health_priority_need_class<-ifelse(data$priority_needs.healthcare == 1, 3,0)
# behaviour changes as result of conflict
data$behavior_change_cause_class<-case_when(data$adult_behavior_change == 'yes'& data$behavior_change_cause=='yes'~ 3,
data$child_behavior_change == 'yes'& data$behavior_change_cause=='yes'~ 3, TRUE~ 0)
data$behavior_change_cause_class[is.na(data$behavior_change_cause_class)] <- 0
# birth location
# data$birth_location_class<-ifelse(data$birth_location == 'home'|data$birth_location == 'midwife_home'|data$birth_location == 'outside'|data$birth_location == 'other',1,0)
# data$birth_location_class[is.na(data$birth_location_class)] <- 0
# Health Severity Score
# data$health_score<- coerc(data[["health_facility_barriers_class"]])+coerc(data[["health_facility_dist_class"]])+coerc(data[["health_facility_affected_class"]])+coerc(data[["health_priority_need_class"]])+coerc(data[["behavior_change_cause_class"]])+coerc(data[["birth_location_class"]])
data$health_score<- coerc(data[["health_facility_barriers_class"]])+coerc(data[["health_facility_dist_class"]])+coerc(data[["health_facility_affected_class"]])+coerc(data[["health_priority_need_class"]])+coerc(data[["behavior_change_cause_class"]])
data$health_severity<-car::recode(data$health_score,
"0:2='1';
3:5='2';
6:8='3';
9:16='4'")
data$health_sev_high<-ifelse(data$health_severity==3|data$health_severity==4,1,0)
#################################################################
# number sectoral needs ####
data$total_sectoral_needs<-coerc(data[["fsac_sev_high"]])+coerc(data[["prot_sev_high"]])+coerc(data[["esnfi_sev_high"]])+coerc(data[["wash_sev_high"]])+coerc(data[["nut_sev_high"]])+coerc(data[["edu_sev_high"]])+coerc(data[["health_sev_high"]])
#################################################################
### LSCI - coping strategies ####
# coping severity
data$lcsi_severity<-car::recode(data$lcsi_category,
"'food_secure'='minimal';
'marginally_insecure'='stress';
'moderately_insecure'='severe';
'severely_insecure'='extreme'")
## Indicators ####
### Numerators
## Some numerators combine variables calcualte those here
data$edu_age_boys_girls_num <- comp_score(data, c("boys_ed","girls_ed"))
food_water_rent_vars <- c(
"food_exp",
"water_expt",
"rent_exp"
)
data$food_water_rent_num <- comp_score(data, food_water_rent_vars)
all_expenses_vars <- c(
"food_exp",
"water_expt",
"rent_exp",
"fuel_exp",
"debt_exp")
data$all_expenses <- comp_score(data, all_expenses_vars)
min_die_vars <- c(
"minimum_dietary_diversity.staples",
"minimum_dietary_diversity.legumes",
"minimum_dietary_diversity.dairy",
"minimum_dietary_diversity.meat",
"minimum_dietary_diversity.eggs",
"minimum_dietary_diversity.vitamin_a_veg",
"minimum_dietary_diversity.other_veg")
data$min_die_num <- comp_score(data, min_die_vars)
priority_nfi_vars <- c(
"sleeping_mats",
"tarpaulin",
"cooking_pots",
"stainless_steel",
"water_storage",
"hygiene_sanitation"
)
data$priority_nfi_num <- comp_score(data, priority_nfi_vars)
child_vars <- c(
"males_0_2_total",
"males_3_5_total",
"females_0_2_total",
"females_3_5_total")
data$children_under5 <- comp_score(data, child_vars)
comp_ind_vars <- c(
"prot_sev_high",
"fsac_sev_high",
"esnfi_sev_high",
"wash_sev_high",
"edu_sev_high",
"health_sev_high"
)
data$comp_ind_sev <- comp_score(data, comp_ind_vars)
comp_ind_vars_nut <- c(
"prot_sev_high",
"fsac_sev_high",
"esnfi_sev_high",
"wash_sev_high",
"edu_sev_high",
"health_sev_high",
"nut_sev_high"
)
data$comp_ind_sev_nut <- comp_score(data, comp_ind_vars_nut)
## Age categories hh
data <- data %>%
mutate(
age_0_4_hh = case_when(
hoh_age <=4 ~ 1,
TRUE ~ 0
),
age_0_17_hh = case_when(
hoh_age <=17 ~ 1,
TRUE ~ 0
) ,
age_0_14_hh = case_when(
hoh_age <=14 ~ 1,
TRUE ~ 0
) ,
age_10_17_hh = case_when(
hoh_age >= 10 & hoh_age <=17 ~ 1,
TRUE ~ 0
) ,
age_15_64_hh = case_when(
hoh_age >= 14 & hoh_age <=64 ~ 1,
TRUE ~ 0
) ,
age_18_59_hh = case_when(
hoh_age >= 18 & hoh_age <=59 ~ 1,
TRUE ~ 0
) ,
age_18_64_hh = case_when(
hoh_age >= 18 & hoh_age <=64 ~ 1,
TRUE ~ 0
) ,
age_60_and_more_hh = case_when(
hoh_age >= 60 ~ 1,
TRUE ~ 0
) ,
age_65_hh = case_when(
hoh_age >= 65 ~ 1,
TRUE ~ 0
) ,
testt = case_when(
hoh_age < 120 ~ 1,
TRUE ~ 0
)
)
## Age categories roster
hh_group <- overall_hh_roster %>%
mutate(
age_0_4 = hh_member_age <=4,
age_0_17 = hh_member_age <=17,
age_0_14 = hh_member_age <=14,
age_10_17 = hh_member_age >= 10 & hh_member_age <=17,
age_15_64 = hh_member_age >= 14 & hh_member_age <=64,
age_18_59 = hh_member_age >= 18 & hh_member_age <=59,
age_18_64 = hh_member_age >= 18 & hh_member_age <=64,
age_60_and_more = hh_member_age >= 60,
age_65 = hh_member_age >= 65
) %>%
group_by(`_submission__uuid`) %>%
summarise(
age_0_4 = sum(age_0_4, na.rm = TRUE),
age_0_17 = sum(age_0_17, na.rm = TRUE),
age_0_14 = sum(age_0_14, na.rm = TRUE),
age_10_17 = sum(age_10_17, na.rm = TRUE),
age_15_64 = sum(age_15_64, na.rm = TRUE),
age_18_59 = sum(age_18_59, na.rm = TRUE),
age_18_64 = sum(age_18_64, na.rm = TRUE),
age_60_and_more = sum(age_60_and_more, na.rm = T),
age_65 = sum(age_65, na.rm = TRUE)
)
# Age Cat Vars
age_0_4_vars <- c(
'age_0_4',
'age_0_4_hh'
)
age_0_17_vars <- c(
'age_0_17',
'age_0_17_hh'
)
age_0_14_vars <- c(
'age_0_14',
'age_0_14_hh'
)
age_10_17_vars <- c(
'age_10_17',
'age_10_17_hh'
)
age_15_64_vars <- c(
'age_15_64',
'age_15_64_hh'
)
age_18_59_vars <- c(
'age_18_59',
'age_18_59_hh'
)
age_18_64_vars <- c(
'age_18_64',
'age_18_64_hh'
)
age_60_and_more_var <- c(
'age_60_and_more',
'age_60_and_more_hh'
)
age_65_var <- c(
'age_65',
'age_65_hh'
)
data <- full_join(data, hh_group,by = c("uuid"="_submission__uuid"))
# Merge Age cat hh_roster and hh data
data$age_0_4_merged <- comp_score(data,age_0_4_vars)
data$age_0_17_merged <- comp_score(data,age_0_17_vars)
data$age_0_14_merged <- comp_score(data,age_0_14_vars)
data$age_10_17_merged <- comp_score(data,age_10_17_vars)
data$age_15_64_merged <- comp_score(data,age_15_64_vars)
data$age_18_59_merged <- comp_score(data,age_18_59_vars)
data$age_18_64_merged <- comp_score(data,age_18_64_vars)
data$age_60_and_more_merged <- comp_score(data,age_60_and_more_var)
data$age_65_merged <- comp_score(data,age_65_var)
# Adjust displacement status as more information in other data
non_displ_data <- read.csv("input/Non_Displaced_Host_List_v2.csv",stringsAsFactors=F,na.strings = c("", "NA"))
data<-full_join(data, non_displ_data,by = c("district"="district"))
data$final_displacement_status_non_displ<-ifelse(data$final_displacement_status=='non_displaced'|data$final_displacement_status=='host', data$non_displ_class,data$final_displacement_status)
# prev_displacement
data <- data %>%
mutate(
# prev_displacement_num
prev_displacement_num_class = case_when(
prev_displacement_num == 2 ~ "2",
prev_displacement_num == 3 ~ "3",
prev_displacement_num >3 ~ "4+"
),
# refugee_displace_year
refugee_displace_year_class = case_when(
refugee_displace_year == 0 ~ "0",
refugee_displace_year == 1 ~ "1",
refugee_displace_year == 2 ~ "2",
refugee_displace_year == 3 ~ "3",
refugee_displace_year > 3 ~ "4+"
),
# cb_return_displace_year
cb_return_displace_year_class = case_when(
cb_return_displace_year == 0 ~ "0",
cb_return_displace_year == 1 ~ "1",
cb_return_displace_year == 2 ~ "2",
cb_return_displace_year == 3 ~ "3",
cb_return_displace_year > 3 ~ "4+"
),
# cb_return_return_year
cb_return_return_year_call = case_when(
cb_return_return_year == 0 ~ "0",
cb_return_return_year == 1 ~ "1",
cb_return_return_year == 2 ~ "2",
cb_return_return_year == 3 ~ "3",
cb_return_return_year > 3 ~ "4+"
),
# idp_displ_year
idp_displ_year_class = case_when(
idp_displ_year == 0 ~ "0",
idp_displ_year == 1 ~ "1",
idp_displ_year == 2 ~ "2",
idp_displ_year == 3 ~ "3",
idp_displ_year > 3 ~ "4+"
),
# head of household age_group
hoh_age_group = case_when(
hoh_age >= 65 ~ "65+",
hoh_age < 65 ~ "<65"
),
# head of household disabled
hoh_disabled = case_when(
wg_walking == "yes" | wg_selfcare == "yes" ~ "disabled",
wg_walking == "no" | wg_selfcare == "no" ~ "not_disabled",
TRUE ~ NA_character_
),
pregnant_member = case_when(
pregnant > 0 ~ "at_least_one_mem_pregnant",
pregnant == 0 ~ "no_mem_pregnent",
TRUE ~ NA_character_
),
lactating_member = case_when(
lactating > 0 ~ "at_least_one_mem_lactating",
lactating == 0 ~ "no_mem_lactating",
TRUE ~ NA_character_
),
pregnant_lactating_member = case_when(
pregnant > 0 | lactating > 0 ~ "at_least_one_mem_pregnant_lactating",
pregnant == 0 & lactating == 0 ~ "no_mem_pregnent_lactating",
TRUE ~ NA_character_
),
female_literacy_yes_no = case_when(
female_literacy == 0 ~ "0",
female_literacy >= 1 ~ "1 or more",
TRUE ~ NA_character_
),
male_literacy_yes_no = case_when(
male_literacy == 0 ~ "0",
male_literacy >= 1 ~ "1 or more",
TRUE ~ NA_character_
),
# How many adults 18+ years worked outside of the household in the last 30 days?
adults_working_yes_no = case_when(
adults_working == 0 ~ "0",
adults_working >= 1 ~ "1 or more",
TRUE ~ NA_character_
),
children_working_yes_no = case_when(
children_working == 0 ~ "0",
children_working >= 1 ~ "1 or more",
TRUE ~ NA_character_
),
ag_income_cal = case_when(
ag_income == 0 ~ 0,
ag_income > 0 ~ ag_income / hh_size,
TRUE ~ NA_real_
),
livestock_income_cal = case_when(
livestock_income == 0 ~ 0,
livestock_income > 0 ~ livestock_income / hh_size,
TRUE ~ NA_real_
),
rent_income_cal = case_when(
rent_income == 0 ~ 0,
rent_income > 0 ~ rent_income / hh_size,
TRUE ~ NA_real_
),
small_business_income_cal = case_when(
small_business_income == 0 ~ 0,
small_business_income > 0 ~ small_business_income / hh_size,
TRUE ~ NA_real_
),
unskill_labor_income_cal = case_when(
unskill_labor_income == 0 ~ 0,
unskill_labor_income > 0 ~ unskill_labor_income / hh_size,
TRUE ~ NA_real_
),
skill_labor_income_cal = case_when(
skill_labor_income == 0 ~ 0,
skill_labor_income > 0 ~ skill_labor_income / hh_size,
TRUE ~ NA_real_
),
formal_employment_income_cal = case_when(
formal_employment_income == 0 ~ 0,
formal_employment_income > 0 ~ formal_employment_income / hh_size,
TRUE ~ NA_real_
),
gov_benefits_income_cal = case_when(
gov_benefits_income == 0 ~ 0,
gov_benefits_income > 0 ~ gov_benefits_income / hh_size,
TRUE ~ NA_real_
),
hum_assistance_income_cal = case_when(
hum_assistance_income == 0 ~ 0,
hum_assistance_income > 0 ~ hum_assistance_income / hh_size,
TRUE ~ NA_real_
),
remittance_income_cal = case_when(
remittance_income == 0 ~ 0,
remittance_income > 0 ~ remittance_income / hh_size,
TRUE ~ NA_real_
),
loans_income_cal = case_when(
loans_income == 0 ~ 0,
loans_income > 0 ~ loans_income / hh_size,
TRUE ~ NA_real_
),
asset_selling_income_cal = case_when(
asset_selling_income == 0 ~ 0,
asset_selling_income > 0 ~ asset_selling_income / hh_size,
TRUE ~ NA_real_
),
total_income_cal = case_when(
total_income == 0 ~ 0,
total_income > 0 ~ total_income / hh_size,
TRUE ~ NA_real_
),
# Debt level
debt_amount_cal = case_when(
debt_amount == 0 ~ 0,
debt_amount > 0 ~ debt_amount / hh_size,
TRUE ~ NA_real_),
food_exp_cal = case_when(
total_income == 0 ~ 0,
total_income > 0 ~ food_exp / total_income,
TRUE ~ NA_real_
),
water_expt_cal = case_when(
total_income == 0 ~ 0,
total_income > 0 ~ water_expt / total_income,
TRUE ~ NA_real_
),
rent_exp_cal = case_when(
total_income == 0 ~ 0,
total_income > 0 ~ rent_exp / total_income,
TRUE ~ NA_real_
),
fuel_exp_cal = case_when(
total_income == 0 ~ 0,
total_income > 0 ~ fuel_exp / total_income,
TRUE ~ NA_real_
),
debt_exp_cal = case_when(
total_income == 0 ~ 0,
total_income > 0 ~ debt_exp / total_income,
TRUE ~ NA_real_
),
basic_needs_cal = case_when(
total_income == 0 ~ 0,
food_water_rent_num > 0 ~ food_water_rent_num / total_income,
TRUE ~ NA_real_
),
minimum_dietary_diversity_cal = case_when(
min_die_num >= 4 ~ "4 food groups",
min_die_num < 4 ~ "<4 food groups",
TRUE ~ NA_character_
),
rooms_hh_cal = case_when(
rooms > 0 ~ hh_size / rooms,
TRUE ~ 0
),
blankets_people_cal = case_when(
blankets_number == 0 ~ 0,
blankets_number > 0 ~ blankets_number / hh_size,
TRUE ~ NA_real_
),
blankets_suff_cal = case_when(
blankets_people_cal < 1 ~ "<1",
blankets_people_cal >= 1 ~ "1+",
TRUE ~ NA_character_
),
priority_nfi_cal = case_when(
priority_nfi_num <= 1 ~ "0_1",
priority_nfi_num <= 3 ~ "2_3",
priority_nfi_num <= 5 ~ "4_5",
priority_nfi_num <= 6 ~ "6",
TRUE ~ NA_character_
),
imp_energy_source1_cal = case_when(
energy_source %in% c("wood" , "animal_waste" , "paper_waste") ~ 1,
TRUE ~ 0
),
imp_energy_source2_cal = case_when(
energy_source %in% c("coal" , "charcoal" , "lpg" , "electricity") ~ 1,
TRUE ~ 0
),
diarrhea_cases_cal = case_when(
diarrhea_cases == 0 ~ 0,
diarrhea_cases > 0 ~ diarrhea_cases / diarrhea_total,
TRUE ~ NA_real_
),
perc_diarrhea_cases_cal = case_when(
diarrhea_cases == 0 ~ "0",
diarrhea_cases > 0 ~ ">=1",
TRUE ~ NA_character_
),
imp_water_source1_cal = case_when(
water_source %in% c("handpump_private", "handpump_public",
"piped_public", "spring_protected") ~ 1,
TRUE ~ 0
),
imp_water_source2_cal = case_when(
water_source %in% c("spring_unprotected","surface_water"
, "water_trucking", "other") ~ 1,
TRUE ~ 0
),
imp_san_source1_cal = case_when(
water_source %in% c("open", "pit_latrine_uncovered",
"other") ~ 1,
TRUE ~ 0
),
imp_san_source2_cal = case_when(
water_source %in% c("public_latrine", "pit_latrine_covered",
"vip_latrine", "flush_toilet_open_drain",
"flush_toilet_septic") ~ 1,
TRUE ~ 0
),
comp_ind_sev_2_call = case_when(
comp_ind_sev >= 2 ~ ">=2",
comp_ind_sev <2 ~ "<2",
TRUE ~ NA_character_
),
comp_ind_sev_2_nut_call = case_when(
comp_ind_sev_nut >= 2 ~ ">=2",
comp_ind_sev_nut <2 ~ "<2",
TRUE ~ NA_character_
),
comp_ind_sev_3_call = case_when(
comp_ind_sev >= 3 ~ ">=3",
comp_ind_sev < 3 ~ "<3",
TRUE ~ NA_character_
),
dep_ratio_call = case_when(
age_0_4_merged == 0 & age_65_merged == 0 ~ 0,
(age_0_14_merged > 0 | age_65_merged > 0) ~
sum(age_0_14_merged,age_65_merged, na.rm = TRUE)/sum(age_15_64_merged,na.rm = TRUE),
TRUE ~ NA_real_
),
dep_ratio_call_2 = case_when(
age_0_17_merged == 0 & age_60_and_more_merged == 0 ~ 0,
(age_0_17_merged > 0 | age_60_and_more_merged > 0) ~
sum(age_0_17_merged,age_60_and_more_merged, na.rm = TRUE)/sum(age_18_59_merged ,na.rm = TRUE),
TRUE ~ NA_real_
),
female_lit_call = case_when(
female_literacy == 0 ~ 0,
female_literacy == 0 ~
female_literacy/sum(females_11_17_total,females_18_plus_total, na.rm=TRUE),
TRUE ~ NA_real_
),
male_lit_call = case_when(
male_literacy == 0 ~ 0,
male_literacy == 0 ~
male_literacy/sum(males_11_17_total,males_18_plus_total, na.rm=TRUE),
TRUE ~ NA_real_
),
adult_behavior_change_call = case_when(
adult_behavior_change == "yes" ~ 1,
adult_behavior_change == "no" ~ 0,
TRUE ~ NA_real_
),
child_behavior_change_call = case_when(
child_behavior_change == "yes" ~ 1,
child_behavior_change == "no" ~ 0,
TRUE ~ NA_real_
),
atleast_one_behav_change_call = case_when(
child_behavior_change_call == 0 & adult_behavior_change_call == 0 ~ 0,
child_behavior_change_call > 0 | adult_behavior_change_call > 0 ~ 1,
TRUE ~ NA_real_
),
adults_working_call = case_when(
adults_working == 0 ~ 0,
adults_working > 0 & age_18_64 > 0 ~ adults_working/age_18_64,
TRUE ~ NA_real_
),
child_working_call = case_when(
is.na(children_working) ~ 0,
children_working == 0 ~ 0,
children_working > 0 & age_10_17 > 0 ~ children_working/age_10_17,
TRUE ~ NA_real_
),
adult_tazkira_cal = case_when(
adult_tazkira == 0 ~ "0",
adult_tazkira >= 1 ~ ">=1",
TRUE ~ NA_character_
),
child_tazkira_cal = case_when(
child_tazkira == 0 ~ "0",
child_tazkira >= 1 ~ ">=1",
TRUE ~ NA_character_
),
child_tazkira_cal = case_when(
child_tazkira == 0 ~ "0",
child_tazkira >= 1 ~ ">=1",
TRUE ~ NA_character_
),
any_tazkira_cal = case_when(
adult_tazkira == 0 & child_tazkira == 0~ "0",
adult_tazkira >= 1 | child_tazkira >= 1~ ">=1",
TRUE ~ NA_character_
),
insuf_blank_energy = case_when(
blankets_suff_cal == "<1" & energy_source == "wood" | energy_source == "paper_waste" | energy_source == "animal_waste" ~ "yes",
TRUE ~ "no"
),
current_presence_mines = case_when(
displ_explosive_presence == "both" | displ_explosive_presence == "current" |
nondispl_explosive_presence == "yes" ~ "current_explosive_presence",
displ_explosive_presence == "previous" | displ_explosive_presence == "no" |
nondispl_explosive_presence == "no" ~ "no_explosive_presence",
TRUE ~ NA_character_
),
# child_working_call = case_when(
# children_working == 0 ~ 0,
# children_working > 0 ~ children_working/age_10_17,
# TRUE ~ NA_real_
# ),
count_current_enrolled_avg = count_current_enrolled / edu_age_boys_girls_num,
count_current_attending_avg = count_current_attending / edu_age_boys_girls_num
)
# Major events
major_events_vars <- c(
"major_events.avalanche",
"major_events.conflict",
"major_events.drought",
"major_events.earthquake",
"major_events.floods",
"major_events.other"
)
major_events_score <- (rowSums(data[major_events_vars]))
data <- data %>%
mutate(
major_events_cal = case_when(
major_events_score == 0 ~ "none",
major_events_score == 1 ~ "1",
major_events_score == 2 ~ "2",
major_events_score >= 3 ~ ">= 3",
TRUE ~ NA_character_
)
)
# hno_intersectoral analysis
data <- data %>%
mutate(
# GBV incident OR threat
gbv_incidents_threats = case_when(
other_incidents == "sgbv" | other_concerns == "sgbv" ~ ">=1",
(other_incidents == "no" | other_incidents == "other") &
(other_concerns == "no" | other_concerns == "other") ~ "0",
TRUE ~ NA_character_
),
# At least one protection incident for adult OR child
prot_incident_adult_child = case_when(
adult_prot_incidents != "none" | child_prot_incidents != "none" ~ ">=1",
adult_prot_incidents == "none" & child_prot_incidents == "none" ~ "0",
TRUE ~ NA_character_
),
# Total income per day per household member in USD
daily_income_hh_members = case_when(
(((total_income / 30) / hh_size) / 78.36) > 1.90 ~ 1,
(((total_income / 30) / hh_size) / 78.36) <= 1.90 ~ 0,
TRUE ~ NA_real_
),
# health
health_service_access_class = case_when(
health_facility_access == "no" ~ 1,
health_facility_access == "yes" ~ 0,
TRUE ~ NA_real_
),
#ESNFI
shelter_type_access_class = case_when(
shelter == "tent" | shelter == "makeshift_shelter" |
shelter == "collective_centre" | shelter == "open_space" ~ 1,
shelter == "transitional" | shelter == "permanent" ~ 0,
TRUE ~ NA_real_
),
#EiE
hh_level_school_attendance_class = case_when(
count_current_attending > 0 ~ 1,
TRUE ~ 0
),
#FSA No data for Farah paper interviews
market_service_access_class = case_when(
market_access == "no" ~ 1,
market_access == "yes" ~ 0,
TRUE ~ NA_real_
),
#protection
identity_ownership_class = case_when(
child_tazkira == 0 & adult_tazkira == 0 ~ 1,
child_tazkira >=1 | adult_tazkira >=1 ~ 0,
TRUE ~ NA_real_
),
#WASH
access_to_water_class = case_when(
water_source == "handpump_private" | water_source == "handpump_public" |
water_source == "piped_public" | water_source == "spring_protected" ~ 1,
water_source == "spring_unprotected" | water_source == "surface_water" |
water_source == "water_trucking" | water_source == "other" ~ 0,
TRUE ~ NA_real_
)
)
access_services_vars <- c("health_service_access_class", "shelter_type_access_class",
"hh_level_school_attendance_class", "market_service_access_class",
"identity_ownership_class","access_to_water_class")
data$services_score <- comp_score(data, access_services_vars)
data <- data %>%
mutate(
comp_ind_access_services = case_when(
services_score <= 2 ~ 0,
services_score >= 3 ~ 1
)
)
#Recoding new variables
data$hh_no_tazkira <- ifelse(data$tazkira_total < 1, "Tazkira_No", "Tazkira_Yes")
data$muac_yes_no <- ifelse(data$muac_total > 0 & !is.na(data$min_muac) ,"Yes","No")
data$recent_non_recent <- ifelse(data$final_displacement_status_non_displ == "recent_idps", "recent_idps",
ifelse(data$final_displacement_status_non_displ == "non_recent_idps", "non_recent_idps", NA ))
data$edu_removal_shock_cal <- ifelse(data$shock_class == 5, "Yes", "No")
data$enrolled_attending <- ifelse(data$count_enrolled_attending > 0, "Enrolled_and_Attending", "Not" )
data$schoo_age_boys_girls <- coerc(data$boys_ed) + coerc(data$girls_ed)
## source disaggs
source("r/prepare_disagg.R")
################ MEB analysis ###########################################################
# sustainable income vars
sustainable_income_vars <- c(
'ag_income',
'livestock_income',
'rent_income',
'small_business_income',
'unskill_labor_income',
'skill_labor_income',
'formal_employment_income',
'gov_benefits_income'
)
# sustainable income per HH
data$sustainable_income <- comp_score(data, sustainable_income_vars)
# sustainable income per HH member
data$sustainable_income_per_mem <- data$sustainable_income / data$hh_size
# net inocome per hh
data$net_income <- data$sustainable_income - data$all_expenses
# total Exp per hh memeber
data$total_exp_per_mem <- data$all_expenses / data$hh_size
data <- data %>%
mutate(
food_exp_per_mem = food_exp / hh_size,
water_expt_per_mem = water_expt / hh_size,
rent_exp_per_mem = rent_exp / hh_size,
fuel_exp_per_mem = fuel_exp / hh_size,
debt_exp_per_mem = debt_exp / hh_size,
food_exp_spent = case_when(
food_exp > 0 ~ 1,
food_exp == 0 ~ 0,
TRUE ~ NA_real_
),
water_expt_spent = case_when(
water_expt > 0 ~ 1,
water_expt == 0 ~ 0,
TRUE ~ NA_real_
),
rent_exp_spent = case_when(
rent_exp > 0 ~ 1,
rent_exp == 0 ~ 0,
TRUE ~ NA_real_
),
fuel_exp_spent = case_when(
fuel_exp > 0 ~ 1,
fuel_exp == 0 ~ 0,
TRUE ~ NA_real_
),
debt_exp_spent = case_when(
debt_exp > 0 ~ 1,
debt_exp == 0 ~ 0,
TRUE ~ NA_real_
)
)
# sustainable income 2 vars
sustainable_income_2_vars <- c(
'ag_income',
'livestock_income',
'rent_income',
'small_business_income',
'unskill_labor_income',
'skill_labor_income',
'formal_employment_income'
)
# sustainable income per HH
data$sustainable_income_2 <- comp_score(data, sustainable_income_2_vars)
# sustainable income per HH member
data$sustainable_income_2_per_mem <- data$sustainable_income_2 / data$hh_size
# unskilled_labor_income per hh member
data$unskill_labor_income_per_mem <- data$unskill_labor_income / data$hh_size
# unskilled + agriculture + livestock income vars
unskill_ag_live_income_vars <- c(
'ag_income',
'livestock_income',
'unskill_labor_income'
)
# unskilled + agriculture + livestock income
data$unskill_ag_live_income_income <- comp_score(data, unskill_ag_live_income_vars)
# unskilled + agriculture + livestock income per HH member
data$unskill_ag_live_income_income_per_mem <- data$unskill_ag_live_income_income / data$hh_size
########################################################################################
############################# Vulnerablity composites ###################################
data <- data %>%
mutate(
hoh_disabled_vul_class = case_when(
data$hoh_disabled == "disabled" ~ 1,
data$hoh_disabled == "not_disabled" ~ 0,
TRUE ~ 0
),
hoh_debt_disagg_vul_class = case_when(
hoh_debt_disagg == "high_debt" ~ 1,
hoh_debt_disagg == "low_debt" ~ 0,
hoh_debt_disagg == "medium_debt" ~ 0,
hoh_debt_disagg == "no_debt" ~ 0,
TRUE ~ 0
),
tazkira_disagg_vul_class = case_when(
tazkira_disagg == "non_have_tazkira" ~ 1,
tazkira_disagg == "all_have_tazkira" ~ 0,
TRUE ~ 0
),
hoh_age_group_vul_class = case_when(
hoh_age_group == "65+" ~ 1,
hoh_age_group == "<65" ~ 0,
TRUE ~ 0
),
hoh_sex_disagg_vul_class = case_when(
hoh_sex_disagg == "female" ~ 1,
hoh_sex_disagg == "male" ~ 0,
TRUE ~ 0
),
pregnant_lactating_member_vul_class = case_when(
pregnant_lactating_member == "at_least_one_mem_pregnant_lactating" ~ 1,
pregnant_lactating_member == "no_mem_pregnent_lactating" ~ 0,
TRUE ~ 0
),
chronic_illness_vul_class = case_when(
chronic_illness == "yes" ~ 1,
chronic_illness == "no " ~ 0,
chronic_illness == "no_answer" ~ 0,
TRUE ~ 0
),
literacy_vul_class = case_when(
female_literacy_yes_no == "0" & male_literacy_yes_no == "0" ~ 1,
female_literacy_yes_no == "1 or more" ~ 0,
male_literacy_yes_no == "1 or more" ~ 0,
TRUE ~ 0
),
behav_change_disagg_vul_class = case_when(
behav_change_disagg == "yes" ~ 1,
behav_change_disagg == "no" ~ 0,
TRUE ~ 0
),
female_literacy_yes_no_class = case_when(
female_literacy_yes_no == "0" ~ 1,
female_literacy_yes_no == "1 or more" ~ 0,
TRUE ~ 0
),
behavior_change_cause_class2 = case_when(
behavior_change_cause == "yes" ~ 1,
TRUE ~ 0
),
child_behavior_change_class2 = case_when(
child_behavior_change == "yes" ~ 1,
TRUE ~ 0
),
adult_behavior_change_class2 = case_when(
adult_behavior_change == "yes" ~ 1,
TRUE ~ 0
),
adult_behavior_only_by_conflict = case_when(
adult_behavior_change == "yes" & behavior_change_cause == "yes" ~ 1,
TRUE ~ 0
)
)
## Vulnerable_group_1
Vulnerable_group_1_vars <- c(
"hoh_disabled_vul_class",
"hoh_debt_disagg_vul_class",
"tazkira_disagg_vul_class",
"hoh_age_group_vul_class",
"hoh_sex_disagg_vul_class",
"chronic_illness_vul_class",
"literacy_vul_class"
)
data$Vulnerable_group_1_vars_score <- comp_score(data, Vulnerable_group_1_vars)
data <- data %>%
mutate(
vulnerable_group_1 = case_when(
Vulnerable_group_1_vars_score >= 1 ~ "vulnerable",
Vulnerable_group_1_vars_score == 0 ~ "not_vulnerable",
TRUE ~ NA_character_
)
)
## vulnerable_group_4
Vulnerable_group_4_vars <- c(
"hoh_disabled_vul_class",
"hoh_debt_disagg_vul_class",
"tazkira_disagg_vul_class",
"hoh_age_group_vul_class",
"hoh_sex_disagg_vul_class",
"behav_change_disagg_vul_class"
)
data$Vulnerable_group_4_vars_score <- comp_score(data, Vulnerable_group_4_vars)
data <- data %>%
mutate(
vulnerable_group_4 = case_when(
Vulnerable_group_4_vars_score >= 1 ~ "vulnerable",
Vulnerable_group_4_vars_score == 0 ~ "not_vulnerable",
TRUE ~ NA_character_
)
)
## Vulnerable_group_5
Vulnerable_group_5_vars <- c(
"hoh_disabled_vul_class",
"hoh_debt_disagg_vul_class",
"tazkira_disagg_vul_class",
"hoh_age_group_vul_class",
"hoh_sex_disagg_vul_class",
"pregnant_lactating_member_vul_class",
"chronic_illness_vul_class",
"behav_change_disagg_vul_class",
"literacy_vul_class"
)
data$Vulnerable_group_5_vars_score <- comp_score(data, Vulnerable_group_5_vars)
data <- data %>%
mutate(
vulnerable_group_5 = case_when(
Vulnerable_group_5_vars_score >= 1 ~ "vulnerable",
Vulnerable_group_5_vars_score == 0 ~ "not_vulnerable",
TRUE ~ NA_character_
)
)
## Vulnerable_group_6
Vulnerable_group_6_vars <- c(
"hoh_disabled_vul_class",
"hoh_debt_disagg_vul_class",
"tazkira_disagg_vul_class",
"hoh_age_group_vul_class",
"hoh_sex_disagg_vul_class",
"behav_change_disagg_vul_class",
"female_literacy_yes_no_class"
)
data$Vulnerable_group_6_vars_score <- comp_score(data, Vulnerable_group_6_vars)
data <- data %>%
mutate(
vulnerable_group_6 = case_when(
Vulnerable_group_6_vars_score >= 1 ~ "vulnerable",
Vulnerable_group_6_vars_score == 0 ~ "not_vulnerable",
TRUE ~ NA_character_
)
)
## Vulnerable_group_7
## vulnerable_group_7
Vulnerable_group_7_vars <- c(
"hoh_disabled_vul_class",
"hoh_debt_disagg_vul_class",
"tazkira_disagg_vul_class",
"hoh_age_group_vul_class",
"hoh_sex_disagg_vul_class",
"behavior_change_cause_class2"
)
data$Vulnerable_group_7_vars_score <- comp_score(data, Vulnerable_group_7_vars)
data <- data %>%
mutate(
vulnerable_group_7 = case_when(
Vulnerable_group_7_vars_score >= 1 ~ "vulnerable",
Vulnerable_group_7_vars_score == 0 ~ "not_vulnerable",
TRUE ~ NA_character_
)
)
## vulnerable_group_8
Vulnerable_group_8_vars <- c(
"hoh_disabled_vul_class",
"hoh_debt_disagg_vul_class",
"tazkira_disagg_vul_class",
"hoh_age_group_vul_class",
"hoh_sex_disagg_vul_class",
"adult_behavior_only_by_conflict"
)
data$Vulnerable_group_8_vars_score <- comp_score(data, Vulnerable_group_8_vars)
data <- data %>%
mutate(
vulnerable_group_8 = case_when(
Vulnerable_group_8_vars_score >= 1 ~ "vulnerable",
Vulnerable_group_8_vars_score == 0 ~ "not_vulnerable",
TRUE ~ NA_character_
)
)
###############################################end
#########################
## esnfi_new_indicator_1
data <- data %>%
mutate(
shelter_class2 = case_when(
shelter == "tent" | shelter == "collective_centre" | shelter == "makeshift_shelter" |
shelter == "open_space" ~ 1,
shelter == "transitional" | shelter == "permanent" ~ 0,
TRUE ~ 0
),
shelter_damage_and_repair_class = case_when(
(shelter_damage.due_to_conflict == 1 | shelter_damage.due_to_natural_disaster == 1) &
shelter_damage_repair == "no" ~ 1,
shelter_damage.no == 1 | shelter_damage_repair == "yes" ~ 0,
TRUE ~ 0
),
priority_nfi_cal_class = case_when(
priority_nfi_cal == "0_1" | priority_nfi_cal == "2_3" ~ 1,
priority_nfi_cal == "4_5" | priority_nfi_cal == "6" ~ 0,
TRUE ~ 0
)
)
esnfi_new_indicator_1_vars <- c(
"shelter_class2",
"shelter_damage_and_repair_class",
"priority_nfi_cal_class"
)
esnfi_new_indicator_1_vars_score <- comp_score(data, esnfi_new_indicator_1_vars)
data <- data %>%
mutate(
esnfi_new_indicator_1 = case_when(
esnfi_new_indicator_1_vars_score >= 1 ~ 1,
esnfi_new_indicator_1_vars_score == 0 ~ 0,
TRUE ~ 0
)
)
###################################end
######## wash_new_indicator 1#######
data <- data %>%
mutate(
water_source_class2 = case_when(
water_source == "spring_unprotected" | water_source == "surface_water" | water_source == "water_trucking" |
water_source == "other" ~ 1,
water_source == "handpump_private" | water_source == "handpump_public" | water_source == "handpump_public" |
water_source == "spring_protected" ~ 0,
TRUE ~ 0
),
latrine_class2 = case_when(
latrine == "open" | latrine == "pit_latrine_uncovered" | latrine == "other" ~ 1,
latrine == "public_latrine" | latrine == "pit_latrine_covered" | latrine == "vip_latrine" |
latrine == "flush_toilet_open_drain" | latrine == "flush_toilet_septic" ~ 0,
TRUE ~ 0
),
soap_class2 = case_when(
soap == "no" ~ 1,
soap == "yes_didnt_see" | soap == "yes_saw" ~ 0,
TRUE ~ 0
),
diarrhea_cases_class = case_when(
diarrhea_cases >= 1 ~ 1,
diarrhea_cases == 0 ~ 0,
TRUE ~ 0
),
diarrhea_cases_class_children = case_when(
diarrhea_cases >= 1 ~ 1,
diarrhea_cases == 0 ~ 0,
TRUE ~ NA_real_
)
)
wash_new_indicator_1_vars <- c(
"water_source_class2",
"latrine_class2",
"soap_class2"
)
wash_new_indicator_1_vars_score <- comp_score(data, wash_new_indicator_1_vars)
data <- data %>%
mutate(
wash_new_indicator_1 = case_when(
wash_new_indicator_1_vars_score >= 2 ~ 1,
wash_new_indicator_1_vars_score == 0 ~ 0,
TRUE ~ 0
)
)
##############################################end
######## wash_new_indicator 2 #######
wash_new_indicator_2_vars <- c(
"water_source_class2",
"latrine_class2",
"soap_class2",
"diarrhea_cases_class"
)
wash_new_indicator_2_vars_score <- comp_score(data, wash_new_indicator_2_vars)
data <- data %>%
mutate(
wash_new_indicator_2 = case_when(
wash_new_indicator_2_vars_score >= 2 ~ 1,
wash_new_indicator_2_vars_score == 0 ~ 0,
TRUE ~ 0
)
)
############# winterization_indicator ##################
data <- data %>%
mutate(
shelter_class3 = case_when(
shelter == "tent" | shelter == "collective_centre" | shelter == "makeshift_shelter" |
shelter == "open_space" ~ 1,
shelter == "transitional" | shelter == "permanent" ~ 0,
TRUE ~ 0
),
blankets_suff_cal_class = case_when(
blankets_suff_cal == "<1" ~ 1,
blankets_suff_cal == "1+" ~ 0,
TRUE ~ 0
),
energy_source_class = case_when(
energy_source == "animal_waste" | energy_source == "charcoal" | energy_source == "paper_waste" |
energy_source == "wood" ~ 1,
energy_source == "coal" | energy_source == "lpg" | energy_source == "electricity" |
energy_source == "other" ~ 0,
TRUE ~ 0
)
)
winterization_indicator_vars <- c(
"shelter_class3",
"blankets_suff_cal_class",
"energy_source_class"
)
winterization_indicator_vars_score <- comp_score(data, winterization_indicator_vars)
data <- data %>%
mutate(
winterization_indicator = case_when(
winterization_indicator_vars_score >= 2 ~ 1,
winterization_indicator_vars_score == 0 ~ 0,
TRUE ~ 0
)
)
################################################end
#################### dip push factors ############
ipd_push_factors_vars <- c(
'idp_push_factors.active_conflict',
'idp_push_factors.anticipated_conflict',
'idp_push_factors.earthquake',
'idp_push_factors.floods',
'idp_push_factors.avalanche',
'idp_push_factors.drought',
'idp_push_factors.poverty',
'idp_push_factors.service_access',
'idp_push_factors.other'
)
ipd_push_factors_vars_short <- c(
'idp_push_factors.active_conflict',
'idp_push_factors.anticipated_conflict',
'idp_push_factors.earthquake',
'idp_push_factors.floods',
'idp_push_factors.avalanche',
'idp_push_factors.drought'
)
data$ipd_push_factors_vars_score <- comp_score(data, ipd_push_factors_vars)
data$ipd_push_factors_vars_score_short <- comp_score(data, ipd_push_factors_vars_short)
data <- data %>%
mutate(
idp_push_factors_cat = case_when(
ipd_push_factors_vars_score == 1 ~ "1_event",
ipd_push_factors_vars_score < 3 ~ "2_events",
ipd_push_factors_vars_score >=3 ~ "3_or_more_events",
TRUE ~ NA_character_
),
idp_push_factors_cat_short = case_when(
ipd_push_factors_vars_score_short == 1 ~ "1_event",
ipd_push_factors_vars_score_short < 3 ~ "2_events",
ipd_push_factors_vars_score_short >=3 ~ "3_or_more_events",
TRUE ~ NA_character_
)
)
##################### MSNI #######################
#### IMPACT
data <- data %>%
mutate(
major_events_impc = case_when(
major_events_cal == ">= 3" | major_events_cal == "2" ~ 3,
major_events_cal == "1" ~ 1,
TRUE ~ 0
),
agricultural_impact_how_impc = case_when(
agricultural_impact_how == "51_75" ~ 1,
agricultural_impact_how == "76_100" ~ 2,
TRUE ~ 0
),
livestock_impact_how_impc = case_when(
livestock_impact_how.livestock_died == 1 ~ 1,
livestock_impact_how.left_unattended == 1 ~ 1,
TRUE ~ 0
),
explosive_impact_death_impc = case_when(
explosive_impact.injury_death == 1 ~ 3,
TRUE ~ 0
),
explosive_impact_others_impc = case_when(
explosive_impact.psych_impact == 1 | explosive_impact.relocation == 1 |
explosive_impact.access_services == 1 | explosive_impact.restrict_recreation == 1 |
explosive_impact.livelihoods_impact == 1 | explosive_impact.other == 1 &
explosive_impact.injury_death != 1 ~ 2,
TRUE ~ 0
),
adult_injuries_cause_impc = case_when(
adult_injuries_cause == "conflict" | adult_injuries_cause == "natural_disaster" |
child_injuries_cause == "conflict" | child_injuries_cause == "natural_disaster" ~ 3,
TRUE ~ 0
),
shelter_damage_impc = case_when(
shelter_damage == "due_to_conflict" | shelter_damage == "due_to_natural_disaster" ~ 2,
TRUE ~ 0
),
edu_removal_shock_impc = case_when(
count_shock >= 1 ~ 1,
TRUE ~ 0
),
health_facility_reopened_impc = case_when(
health_facility_reopened == "remain_closed" ~ 1,
TRUE ~ 0
),
water_damaged_cause_impc = case_when(
water_damaged_cause == "conflict" | water_damaged_cause == "natural_disaster" |
water_damaged_cause == "drought" ~ 2,
TRUE ~ 0
),
aid_access_issue_2_impc = case_when(
aid_access_issue == "yes" & aid_access_issue_type == "insecurity" |
aid_access_issue_type == "explosive_hazards" ~ 2,
TRUE ~ 0
),
aid_access_issue_1_impc = case_when(
aid_access_issue == "yes" & aid_access_issue_type == "distance" |
aid_access_issue_type == "social_restrictions" ~ 1,
TRUE ~ 0
)
)
# impact class vars
msni_impact_score_vars <- c(
"major_events_impc",
"agricultural_impact_how_impc",
"livestock_impact_how_impc",
"explosive_impact_death_impc",
"explosive_impact_others_impc",
"adult_injuries_cause_impc",
"shelter_damage_impc",
"edu_removal_shock_impc",
"health_facility_reopened_impc",
"water_damaged_cause_impc",
"aid_access_issue_2_impc",
"aid_access_issue_1_impc"
)
# impact score
data$msni_impact_score <- comp_score(data, msni_impact_score_vars)
# impact severity
data <- data %>%
mutate(
impact = case_when(
msni_impact_score < 3 ~ 1,
msni_impact_score > 2 & msni_impact_score < 6 ~ 2,
msni_impact_score > 5 & msni_impact_score < 9 ~ 3,
msni_impact_score >= 9 ~ 4,
TRUE ~ 0
)
)
#### End IMPACT
#### Capacity gaps
data <- data %>%
mutate(
capacity_gaps = case_when(
lcsi_severity == "minimal" ~ 1,
lcsi_severity == "stress" ~ 2,
lcsi_severity == "severe" ~ 3,
lcsi_severity == "extreme" ~ 4,
TRUE ~ NA_real_
)
)
#### End Capacity gaps
# HC-LSG/ESNFI - shelter_lsg
data <- data %>%
mutate(
shelter_type_lsg = case_when(
shelter == "open_space" ~ 3,
shelter == "tent" | shelter == "makeshift_shelter" | shelter == "collective_centre" ~ 2,
# shelter == "transitional" ~ 1,
TRUE ~ 0
),
shelter_damage_lsg = case_when(
shelter_damage_extent == "fully_destroyed" & shelter_damage_repair == "no" ~ 3,
shelter_damage_extent == "significant_damage" & shelter_damage_repair == "no" ~ 2,
TRUE ~ 0
),
winterisation_lsg = case_when(
blankets_suff_cal == "<1" & (energy_source == "animal_waste" | energy_source == "paper_waste" |
energy_source == "wood") ~ 3,
TRUE ~ 0
),
access_nfi_lsg = case_when(
priority_nfi_num < 3 ~ 3,
priority_nfi_num > 2 & priority_nfi_num < 6 ~ 2,
TRUE ~ 0
)
)
# shelter_lsg class vars
msni_shelter_lsg_vars <- c(
"shelter_type_lsg",
"shelter_damage_lsg",
"winterisation_lsg",
"access_nfi_lsg"
)
# shelter_lsg score
data$msni_shelter_lsg_score <- comp_score(data, msni_shelter_lsg_vars)
# shelter_lsg severity
data <- data %>%
mutate(
shelter_lsg = case_when(
msni_shelter_lsg_score < 3 ~ 1,
msni_shelter_lsg_score > 2 & msni_shelter_lsg_score < 6 ~ 2,
msni_shelter_lsg_score > 5 & msni_shelter_lsg_score < 9 ~ 3,
msni_shelter_lsg_score >= 9 ~ 4
)
)
#### end shelter_lsg
# HC-LSG/FSA - fsl_lsg
data <- data %>%
mutate(
fcs_lsg = case_when(
fcs_category == "poor" ~ 3,
fcs_category == "borderline" ~ 2,
TRUE ~ 0
),
hhs_lsg = case_when(
hhs_category == "severe_hunger" ~ 3,
hhs_category == "moderate_hunger" ~ 2,
TRUE ~ 0
),
food_source_lsg = case_when(
food_source == "assistance" | food_source == "gift" | food_source == "borrowed" ~ 3,
# food_source == "borrowed" ~ 2,
TRUE ~ 0
),
market_access_lsg = case_when(
market_access == "no" ~ 3,
TRUE ~ 0
),
market_distance_lsg = case_when(
market_distance == "6_10km" ~ 2,
TRUE ~ 0
)
)
# fsl_lsg class vars
msni_fsl_lsg_vars <- c(
"fcs_lsg",
"hhs_lsg",
"food_source_lsg",
"market_access_lsg",
"market_distance_lsg"
)
# fsl_lsg score
data$msni_fsl_lsg_score <- comp_score(data, msni_fsl_lsg_vars)
# fsl_lsg severity
data <- data %>%
mutate(
fsl_lsg = case_when(
msni_fsl_lsg_score < 3 ~ 1,
msni_fsl_lsg_score > 2 & msni_fsl_lsg_score < 6 ~ 2,
msni_fsl_lsg_score > 5 & msni_fsl_lsg_score < 9 ~ 3,
msni_fsl_lsg_score >= 9 ~ 4
)
)
# fsl_lsg severity 2
data <- data %>%
mutate(
fsl_lsg_2 = case_when(
msni_fsl_lsg_score < 3 ~ 1,
msni_fsl_lsg_score > 2 & msni_fsl_lsg_score < 7 ~ 2,
msni_fsl_lsg_score > 6 & msni_fsl_lsg_score < 9 ~ 3,
msni_fsl_lsg_score >= 9 ~ 4
)
)
# fsl_lsg severity 3
data <- data %>%
mutate(
fsl_lsg_3 = case_when(
msni_fsl_lsg_score < 3 ~ 1,
msni_fsl_lsg_score > 2 & msni_fsl_lsg_score < 8 ~ 2,
msni_fsl_lsg_score > 7 & msni_fsl_lsg_score < 10 ~ 3,
msni_fsl_lsg_score >= 10 ~ 4
)
)
#### end fsl_lsg
# HC-LSG/Health - health_lsg
data <- data %>%
mutate(
access_health_center_lsg = case_when(
health_facility_access == "no" ~ 3,
TRUE ~ 0
),
health_facility_distance_lsg = case_when(
health_facility_distance == "none" | health_facility_distance == "more_10km" ~ 3,
health_facility_distance == "6_10km" ~ 2,
TRUE ~ 0
),
behav_change_lsg = case_when(
behav_change_disagg == "yes" ~ 3,
TRUE ~ 0
),
birth_location_lsg = case_when(
birth_location == "outside" | diarrhea_cases_class == 1 ~ 3,
birth_location == "home" | birth_location == "midwife_home" | birth_location == "other" ~ 2,
TRUE ~ 0
)
)
# health_lsg class vars
msni_health_lsg_vars <- c(
"access_health_center_lsg",
"health_facility_distance_lsg",
"behav_change_lsg",
"birth_location_lsg"
)
# health_lsg score
data$msni_health_lsg_score <- comp_score(data, msni_health_lsg_vars)
# health_lsg severity
data <- data %>%
mutate(
health_lsg = case_when(
msni_health_lsg_score < 3 ~ 1,
msni_health_lsg_score > 2 & msni_health_lsg_score < 6 ~ 2,
msni_health_lsg_score > 5 & msni_health_lsg_score < 9 ~ 3,
msni_health_lsg_score >= 9 ~ 4
)
)
#### end health_lsg
# HC-LSG/Protection - protection_lsg
data <- data %>%
mutate(
prot_incidents_4_lsg = case_when(
adult_prot_incidents.assaulted_with_weapon == 1 | adult_prot_incidents.hindered_leave_settlement == 1 |
adult_prot_incidents.forced_work == 1 | adult_prot_incidents.forcibly_detained == 1 |
child_prot_incidents.assaulted_with_weapon == 1 | child_prot_incidents.hindered_leave_settlement == 1 |
child_prot_incidents.forced_work == 1 | child_prot_incidents.forcibly_detained == 1 ~ 4,
TRUE ~ 0
),
prot_incidents_3_lsg = case_when(
adult_prot_incidents.verbally_threatened == 1 | adult_prot_incidents.assaulted_without_weapon == 1 |
adult_prot_incidents.hindered_leave_district == 1 | child_prot_incidents.verbally_threatened == 1 |
child_prot_incidents.assaulted_without_weapon == 1 | child_prot_incidents.hindered_leave_district == 1 &
(adult_prot_incidents.assaulted_with_weapon == 0 | adult_prot_incidents.hindered_leave_settlement == 0 |
adult_prot_incidents.forced_work == 0 | adult_prot_incidents.forcibly_detained == 0 |
child_prot_incidents.assaulted_with_weapon == 0 | child_prot_incidents.hindered_leave_settlement == 0 |
child_prot_incidents.forced_work == 0 | child_prot_incidents.forcibly_detained == 0) ~ 3,
TRUE ~ 0
),
other_incidents_lsg = case_when(
other_incidents == "sgbv" | other_concerns == "sgbv" | boy_marriage == "yes" | girl_marriage == "yes" ~ 3,
TRUE ~ 0
),
prot_concerns_lsg = case_when(
prot_concerns.violence_maiming == 1 | prot_concerns.violence_injuries == 1 | prot_concerns.psych_wellbeing == 1 |
prot_concerns.abduction == 1 | prot_concerns.theft == 1 | prot_concerns.explosive_hazards == 1 |
prot_concerns.destruction_property == 1 | prot_concerns.early_marriage == 1 | prot_concerns.other == 1 ~ 3,
TRUE ~ 0
),
safety_lsg = case_when(
safety == "poor" | safety == "very_poor" ~ 2,
TRUE ~ 0
)
)
# protection_lsg class vars
msni_protection_lsg_vars <- c(
"prot_incidents_4_lsg",
"prot_incidents_3_lsg",
"other_incidents_lsg",
"prot_concerns_lsg",
"safety_lsg"
)
# protection_lsg score
data$msni_protection_lsg_score <- comp_score(data, msni_protection_lsg_vars)
# protection_lsg severity
data <- data %>%
mutate(
protection_lsg = case_when(
msni_protection_lsg_score < 3 ~ 1,
msni_protection_lsg_score > 2 & msni_protection_lsg_score < 6 ~ 2,
msni_protection_lsg_score > 5 & msni_protection_lsg_score < 9 ~ 3,
msni_protection_lsg_score >= 9 ~ 4
)
)
#### end protection_lsg
# HC-LSG/WASH wash_lsg
data <- data %>%
mutate(
water_source_lsg = case_when(
water_source == "surface_water" ~ 3,
water_source == "water_trucking" | water_source == "spring_unprotected" ~ 2,
TRUE ~ 0
),
soap_lsg = case_when(
soap == "no" ~ 3,
TRUE ~ 0
),
latrine_lsg = case_when(
latrine == "open" | latrine == "public_latrine" | waste_disposal == "open_space" ~ 3,
latrine == "pit_latrine_uncovered" | waste_disposal == "burning" ~ 2,
TRUE ~ 0
),
water_distance_lsg = case_when(
water_distance == "over_1km" ~ 3,
water_distance == "500m_to_1km" ~ 2,
TRUE ~ 0
)
)
# wash_lsg class vars
msni_wash_lsg_vars <- c(
"water_source_lsg",
"soap_lsg",
"latrine_lsg",
"water_distance_lsg"
)
# wash_lsg score
data$msni_wash_lsg_score <- comp_score(data, msni_wash_lsg_vars)
# wash_lsg severity
data <- data %>%
mutate(
wash_lsg = case_when(
msni_wash_lsg_score < 3 ~ 1,
msni_wash_lsg_score > 2 & msni_wash_lsg_score < 6 ~ 2,
msni_wash_lsg_score > 5 & msni_wash_lsg_score < 9 ~ 3,
msni_wash_lsg_score >= 9 ~ 4
)
)
# wash_lsg severity 2
data <- data %>%
mutate(
wash_lsg_2 = case_when(
msni_wash_lsg_score < 3 ~ 1,
msni_wash_lsg_score > 2 & msni_wash_lsg_score < 7 ~ 2,
msni_wash_lsg_score > 6 & msni_wash_lsg_score < 9 ~ 3,
msni_wash_lsg_score >= 9 ~ 4
)
)
# wash_lsg severity 3
data <- data %>%
mutate(
wash_lsg_3 = case_when(
msni_wash_lsg_score < 3 ~ 1,
msni_wash_lsg_score > 2 & msni_wash_lsg_score < 8 ~ 2,
msni_wash_lsg_score > 7 & msni_wash_lsg_score < 10 ~ 3,
msni_wash_lsg_score >= 10 ~ 4
)
)
#### end wash_lsg
# HC-LSG/EiE - education_lsg
data <- data %>%
mutate(
not_attending_lsg = case_when(
percent_enrolled >= 0.75 & percent_enrolled <= 1 ~ 4,
percent_enrolled >= 0.5 & percent_enrolled <= 0.749 ~ 3,
percent_enrolled >= 0.25 & percent_enrolled <= 0.449 ~ 2,
percent_enrolled >= 0 & percent_enrolled <= 0.249 ~ 1,
TRUE ~ 0
),
education_level_lsg = case_when(
highest_edu == "none" ~ 2,
highest_edu == "primary" ~ 1,
TRUE ~ 0
),
unattending_security_lsg = case_when(
boy_unattendance_reason.insecurity == 1 | boy_unattendance_reason.child_works_instead == 1 |
girl_unattendance_reason.insecurity == 1 | girl_unattendance_reason.child_works_instead == 1 ~ 3,
TRUE ~ 0
),
unattending_cultural_lsg = case_when(
boy_unattendance_reason.cultural_reasons == 1 | girl_unattendance_reason == 1 |
boy_unattendance_reason.lack_facilities == 1 | girl_unattendance_reason.lack_facilities == 1 ~ 2,
TRUE ~ 0
),
unattending_finance_doc_lsg = case_when(
boy_unattendance_reason.lack_documentation == 1 | boy_unattendance_reason.too_expensive == 1 |
boy_unattendance_reason.lack_teachers == 1 | girl_unattendance_reason.lack_documentation ==1 |
girl_unattendance_reason.too_expensive == 1 | girl_unattendance_reason.lack_teachers == 1 ~ 1,
TRUE ~ 0
)
)
# education_lsg class vars
msni_education_lsg_vars <- c(
"not_attending_lsg",
"unattending_security_lsg",
"unattending_cultural_lsg",
"unattending_finance_doc_lsg",
"education_level_lsg"
)
# education_lsg score
data$msni_education_lsg_score <- comp_score(data, msni_education_lsg_vars)
# education_lsg severity
data <- data %>%
mutate(
education_lsg = case_when(
msni_education_lsg_score < 3 ~ 1,
msni_education_lsg_score > 2 & msni_education_lsg_score < 6 ~ 2,
msni_education_lsg_score > 5 & msni_education_lsg_score < 9 ~ 3,
msni_education_lsg_score >= 9 ~ 4
)
)
#### end education_lsg
#################################################
data <- data %>% filter(!is.na(province))
# fliter prolematic feilds
uuid_filter <- c("ac3e8430-ba88-497b-9895-c1bd8da7f79e",
"8ac61e9b-8ff8-4e4a-9619-1dc0ab31f396",
"7171e0a8-3a40-4c57-b84d-a65f08115994",
"596c244b-ea20-48ef-8218-023ac3f2831f")
`%notin%` <- Negate(`%in%`)
data <- data %>% filter(uuid %notin% uuid_filter )
# MSNI Indicator
data$msni <- msni(education_lsg = data$education_lsg,
fsl_lsg = data$fsl_lsg,
health_lsg = data$health_lsg,
protection_lsg = data$protection_lsg,
shelter_lsg = data$shelter_lsg,
wash_lsg = data$wash_lsg,
capacity_gaps = data$capacity_gaps,
impact = data$impact)
data$msni2 <- msni(education_lsg = data$education_lsg,
fsl_lsg = data$fsl_lsg_2,
health_lsg = data$health_lsg,
protection_lsg = data$protection_lsg,
shelter_lsg = data$shelter_lsg,
wash_lsg = data$wash_lsg_2,
capacity_gaps = data$capacity_gaps,
impact = data$impact)
data$msni3 <- msni(education_lsg = data$education_lsg,
fsl_lsg = data$fsl_lsg_3,
health_lsg = data$health_lsg,
protection_lsg = data$protection_lsg,
shelter_lsg = data$shelter_lsg,
wash_lsg = data$wash_lsg_3,
capacity_gaps = data$capacity_gaps,
impact = data$impact)
data$msni_sev_high <- ifelse(data$msni==3|data$msni==4,1,0)
# HHs found to have severe or extreme sectoral needs in one or more sectors
# lsg_needs_2_cal
data <- data %>%
mutate(
shelter_lsg_class = case_when(
shelter_lsg == 3 | shelter_lsg == 4 ~ 1,
shelter_lsg == 1 | shelter_lsg == 2 ~ 0,
TRUE ~ NA_real_
),
fsl_lsg_class = case_when(
fsl_lsg == 3 | fsl_lsg == 4 ~ 1,
fsl_lsg == 1 | fsl_lsg == 2 ~ 0,
TRUE ~ NA_real_
),
health_lsg_class = case_when(
health_lsg == 3 | health_lsg == 4 ~ 1,
health_lsg == 1 | health_lsg == 2 ~ 0,
TRUE ~ NA_real_
),
protection_lsg_class = case_when(
protection_lsg == 3 | protection_lsg == 4 ~ 1,
protection_lsg == 1 | protection_lsg == 2 ~ 0,
TRUE ~ NA_real_
),
wash_lsg_class = case_when(
wash_lsg == 3 | wash_lsg == 4 ~ 1,
wash_lsg == 1 | wash_lsg == 2 ~ 0,
TRUE ~ NA_real_
),
education_lsg = case_when(
education_lsg == 3 | education_lsg == 4 ~ 1,
education_lsg == 1 | education_lsg == 2 ~ 0,
TRUE ~ NA_real_
)
)
lsg_needs_2_cal_vars <- c(
"shelter_lsg_class",
"fsl_lsg_class",
"health_lsg_class",
"protection_lsg_class",
"wash_lsg_class",
"education_lsg"
)
# lsg_needs_2_cal score
data$lsg_needs_2_cal_score <- comp_score(data, lsg_needs_2_cal_vars)
# lsg_needs_2_cal
data <- data %>%
mutate(
lsg_needs_2_cal = case_when(
lsg_needs_2_cal_score == 0 ~ "no_need",
lsg_needs_2_cal_score == 1 ~ "one_need",
lsg_needs_2_cal_score > 1 ~ "two_or_more_need"
)
)
# msni drivers
data <- data %>%
mutate(
fsl_wash_driver = case_when(
fsl_lsg == 3 | fsl_lsg == 4 | wash_lsg == 3 | wash_lsg == 4 ~ "sectoral_need",
fsl_lsg == 1 | fsl_lsg == 2 | wash_lsg == 1 | wash_lsg == 2 ~ "no_need",
TRUE ~ NA_character_
),
impact_driver = case_when(
((impact == 3 | impact == 4) & (health_lsg == 3 | health_lsg == 4)) |
((impact == 3 | impact == 4) & (shelter_lsg == 3 | shelter_lsg == 4)) |
((impact == 3 | impact == 4) & (protection_lsg == 3 | protection_lsg == 4)) ~ "sectoral_need",
TRUE ~ "no_need"
),
shelter_driver_class = case_when(
shelter_lsg == 3 | shelter_lsg == 4 ~ 1,
TRUE ~ 0,
),
protection_driver_class = case_when(
protection_lsg == 3 | protection_lsg == 4 ~ 1,
TRUE ~ 0
),
health_driver_class = case_when(
health_lsg == 3 | health_lsg == 4 ~ 1,
TRUE ~ 0
),
capacity_gaps_sev = case_when(
capacity_gaps >=3 ~ "high",
capacity_gaps <=2 ~ "low",
TRUE ~ NA_character_
)
)
# esnfi_prot_health_driver
esnfi_prot_health_driver_vars <- c(
"shelter_driver_class",
"protection_driver_class",
"health_driver_class"
)
# esnfi_prot_health_driver score
data$esnfi_prot_health_driver_score <- comp_score(data, esnfi_prot_health_driver_vars)
# lsg_needs_2_cal
data <- data %>%
mutate(
esnfi_prot_health_driver = case_when(
esnfi_prot_health_driver_score <= 1 ~ "no_need",
esnfi_prot_health_driver_score >= 2 ~ "sectoral_need",
)
)
############### MSNI TEST ###############
data <- data %>%
mutate(
hh_msni_one = case_when(
education_lsg == 1 & fsl_lsg == 1 & health_lsg == 1 & protection_lsg == 1 & shelter_lsg == 1 & wash_lsg == 1 &
capacity_gaps == 1 & impact == 1 ~ "1",
TRUE ~ "1+"
),
hh_msni_one_only_sectors = case_when(
education_lsg == 1 & fsl_lsg == 1 & health_lsg == 1 & protection_lsg == 1 & shelter_lsg == 1 & wash_lsg == 1 ~ "1",
TRUE ~ "1+"
)
)
#########################################
#join main dataset var to hh roster
data_sub <- data %>% select(final_displacement_status_non_displ, region_disagg, urban_disagg,
hoh_sex_disagg, hoh_disabled_disagg, hoh_chronic_illness_disagg, hoh_elderly_disagg,
displacements_disagg, literate_adult_disagg, lcsi_disagg, host_disagg, disp_length_disagg, hoh_sex2_disagg,
behav_change_disagg, child_behav_change_disagg,
tazkira_disagg3, hoh_debt_disagg , vulnerable_group_4, vulnerable_group_7, registered_dissagg, informal_settlement,
child_tazkira_disagg, uuid)
overall_hh_roster <- overall_hh_roster %>%
mutate(
school_age = case_when(
hh_member_age >=6 & hh_member_age <= 18 ~ "school_age",
TRUE ~ "not_school_age"
),
current_year_attending_na_no = case_when(
current_year_attending == "no" ~ "no",
current_year_attending == "yes" ~ "yes",
TRUE & school_age == "school_age" ~ "no"
),
edu_removal_shock.no_sch_age = case_when(
edu_removal_shock.no == 1 ~ 1,
TRUE & school_age == "school_age" ~ 0
),
edu_removal_shock.conflict_sch_age = case_when(
edu_removal_shock.conflict == 1 ~ 1,
TRUE & school_age == "school_age" ~ 0
),
edu_removal_shock.displacement_sch_age = case_when(
edu_removal_shock.displacement == 1 ~ 1,
TRUE & school_age == "school_age" ~ 0
),
edu_removal_shock.natural_disaster_sch_age = case_when(
edu_removal_shock.natural_disaster == 1 ~ 1,
TRUE & school_age == "school_age" ~ 0
),
edu_removal_shock_sch_age = case_when(
edu_removal_shock.no == 1 ~ "yes",
TRUE & school_age == "school_age" ~ "no"
),
hh_member_age_cat = case_when(
hh_member_age >= 0 & hh_member_age < 6 ~ "0_5",
hh_member_age > 5 & hh_member_age < 19 ~ "6_18",
hh_member_age > 18 & hh_member_age < 60 ~ "19_59",
hh_member_age > 59 ~ "60+"
),
# demographic hh roster data
hh_member_age_cat_gender = case_when(
hh_member_age >= 0 & hh_member_age < 6 & hh_member_sex == "female" ~ "female_0_5",
hh_member_age >= 0 & hh_member_age < 6 & hh_member_sex == "male" ~ "male_0_5",
hh_member_age > 5 & hh_member_age < 19 & hh_member_sex == "female" ~ "female_6_18",
hh_member_age > 5 & hh_member_age < 19 & hh_member_sex == "male" ~ "male_6_18",
hh_member_age > 18 & hh_member_age < 60 & hh_member_sex == "female" ~ "female_19_59",
hh_member_age > 18 & hh_member_age < 60 & hh_member_sex == "male" ~ "male_19_59",
hh_member_age > 59 & hh_member_sex == "female" ~ "female_60+",
hh_member_age > 59 & hh_member_sex == "male" ~ "male_60+"
),
male_female_perc = case_when(
hh_member_sex == "female" ~ "female",
hh_member_sex == "male" ~ "male"
),
## request # 30
school_age_cat_gender = case_when(
hh_member_age > 5 & hh_member_age < 13 & hh_member_sex == "female" ~ "female_6_12",
hh_member_age > 12 & hh_member_age < 19 & hh_member_sex == "female" ~ "female_13_18",
hh_member_age > 5 & hh_member_age < 13 & hh_member_sex == "male" ~ "male_6_12",
hh_member_age > 12 & hh_member_age < 19 & hh_member_sex == "male" ~ "male_13_18",
TRUE ~ NA_character_
)
)
############## demographic hoh data #####################
hoh_data <- data %>%
select(
hh_member_sex = hoh_sex,
hh_member_age = hoh_age,
`_submission__uuid` = uuid,
province,
survey_village) %>%
mutate(
hh_member_age_cat_gender = case_when(
hh_member_age >= 0 & hh_member_age < 6 & hh_member_sex == "female" ~ "female_0_5",
hh_member_age >= 0 & hh_member_age < 6 & hh_member_sex == "male" ~ "male_0_5",
hh_member_age > 5 & hh_member_age < 19 & hh_member_sex == "female" ~ "female_6_18",
hh_member_age > 5 & hh_member_age < 19 & hh_member_sex == "male" ~ "male_6_18",
hh_member_age > 18 & hh_member_age < 60 & hh_member_sex == "female" ~ "female_19_59",
hh_member_age > 18 & hh_member_age < 60 & hh_member_sex == "male" ~ "male_19_59",
hh_member_age > 59 & hh_member_sex == "female" ~ "female_60+",
hh_member_age > 59 & hh_member_sex == "male" ~ "male_60+"
),
male_female_perc = case_when(
hh_member_sex == "female" ~ "female",
hh_member_sex == "male" ~ "male"
),
school_age_cat_gender = case_when(
hh_member_age > 5 & hh_member_age < 13 & hh_member_sex == "female" ~ "female_6_12",
hh_member_age > 12 & hh_member_age < 19 & hh_member_sex == "female" ~ "female_13_18",
hh_member_age > 5 & hh_member_age < 13 & hh_member_sex == "male" ~ "male_6_12",
hh_member_age > 12 & hh_member_age < 19 & hh_member_sex == "male" ~ "male_13_18",
TRUE ~ NA_character_
)
)
hoh_data <- hoh_data %>%
select(
hh_member_sex,
hh_member_age,
hh_member_age_cat_gender,
male_female_perc,
school_age_cat_gender,
province,
survey_village,
`_submission__uuid`
)
roster_data <- overall_hh_roster %>%
select(
hh_member_sex,
hh_member_age,
hh_member_age_cat_gender,
male_female_perc,
school_age_cat_gender,
province,
survey_village,
`_submission__uuid`
)
combined_hoh_and_roster <- rbind(roster_data, hoh_data)
# for demographic
combined_hoh_and_roster_joined <- koboloops::add_parent_to_loop(combined_hoh_and_roster, data_sub, uuid.name.loop = "_submission__uuid", uuid.name.parent = "uuid")
combined_hoh_and_roster_joined <- combined_hoh_and_roster_joined %>%
mutate(
hh_member_under_over_15 = case_when(
hh_member_age <= 15 ~ "15_and_under",
hh_member_age > 15 ~ "over_15",
TRUE ~ NA_character_
)
)
write.csv(combined_hoh_and_roster_joined, "./input/data/recoded/hh_roster_hoh_demographic.csv", row.names = F)
# for education questions
hh_roster_joined <- koboloops::add_parent_to_loop(overall_hh_roster, data_sub, uuid.name.loop = "_submission__uuid", uuid.name.parent = "uuid")
write.csv(hh_roster_joined, "./input/data/recoded/hh_roster.csv", row.names = F)
write.csv(data, "./input/data/recoded/data_with_strata2.csv", row.names = F)
## Test
|
17d0b4508a89eda9690757bbd1a506dc8eba11fb | de83a2d0fef79a480bde5d607937f0d002aa879e | /P2C2M.SNAPP/R/draw.samples2.R | 4afd6ee40fe1adb1f7db29b2654b926047494a2b | [] | no_license | P2C2M/P2C2M_SNAPP | 0565abc0ea93195c9622dc5d4e693ccde17bebc7 | 94cd62285419a79f5d03666ec2ea3e818803d0db | refs/heads/master | 2020-05-07T18:54:40.440682 | 2020-01-10T15:59:45 | 2020-01-10T15:59:45 | 180,788,408 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,099 | r | draw.samples2.R | ##### Randomly sample from posterior #####
draw.samples <- function(num_sims, gens_run, sample_unif){ # num.sims = user input # of simulations to perform; gens_run = # of markov steps saved; sample_unif = if true, sample posterior uniformly. Otherwise sample randomly
burnin <- ceiling(gens_run * 0.10)
non_burnin <- seq(burnin + 1, gens_run, 1) # get sequence of step numbers in non burnin posterior
if (num_sims > length(non_burnin)){ # if # of simulations input is greater than the number of steps in the posterior
post_samples <- non_burnin # use all non_burnin steps
} else{
if (sample_unif == TRUE){
interval <- length(non_burnin) / num_sims # get interval to sample
post_samples <- non_burnin[1]
while (post_samples[length(post_samples)] + interval <= gens_run){
post_samples = c(post_samples, post_samples[length(post_samples)] + interval)
}
post_samples <- sapply(post_samples, floor) # round down
} else{
post_samples <- sort(sample(non_burnin, num_sims)) # randomly sample steps
}
}
return(post_samples)
}
|
5e6123c9c6678ffff155f6d6bb0973954d846370 | 925c515b771a8ea7ca31cc530308d594c30fba07 | /code/TableS3.R | 3591bcb04a019b009fd1c4d141478c8c465a6176 | [] | no_license | melofton/freshwater-forecasting-review | 41ba42f0aee6180d7a731fcf838dccc8f7590588 | c06097cbab6d88c1dc30d0f2c3cf8a3baddaeacc | refs/heads/main | 2023-07-06T21:54:48.183725 | 2023-06-27T20:18:46 | 2023-06-27T20:18:46 | 478,673,588 | 0 | 1 | null | 2022-07-08T19:45:20 | 2022-04-06T18:05:25 | R | UTF-8 | R | false | false | 541 | r | TableS3.R | #Matrix analysis
#Author: Mary Lofton
#Date: 06JUL22
#clear environment
rm(list = ls())
#set-up
pacman::p_load(tidyverse, lubridate, cowplot,ggbeeswarm, viridis)
#read in data
dat5 <- read_csv("./data/cleaned_matrix.csv")
##Table 3 ####
dat10 <- dat5 %>%
mutate(ecosystem_type = ifelse(ecosystem == "river" | grepl("basin",other_ecosystem),"Lotic","Lentic"))
colnames(dat10)
tab3 <- dat10[,c(2,4,3,27,11,12,13,14,18,19,20,21,16,17,23)] %>%
arrange(Year)
tab3[16,"Year"] <- 2022
write.csv(tab3,"Table3.csv",row.names = FALSE)
|
1bc891cc48422875088ad36e2f4ff1053e811f2d | 218aae83a9d0994561991ba8affe528f1e381457 | /R/edgepoints.R | e7cee35ceeef11b83b9b9d0b6abfaa1dc7d09ef5 | [] | no_license | cran/edci | 4efcf830e8cec5d1522397140afd5650655b66b3 | d24ed3f7d6bd543f5b1fa07b8db821d42c8fe795 | refs/heads/master | 2020-12-25T16:56:26.204461 | 2018-05-16T20:49:37 | 2018-05-16T20:49:37 | 17,718,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,698 | r | edgepoints.R | edgepoints = function(data, h1n, h2n, asteps = 4, estimator = "kernel", kernel = "mean",
score = "gauss", sigma = 1, kernelfunc = NULL, margin = FALSE) {
epDelta = function(x) {
if (x < 0)
-1
else
1
}
epAt = function(x, y) {
if (x == 0) {
if (y >= 0)
pi/2
else
pi/2
} else {
atan(y/x)
}
}
epR1 = function(theta, x, y) {
sqrt(x^2 + y^2) * epDelta(x) * cos(epAt(x, y) - theta)
}
epR2 = function(theta, x, y) {
sqrt(x^2 + y^2) * epDelta(x) * sin(epAt(x, y) - theta)
}
angle = matrix(double(length(data)), nrow=nrow(data))
value = matrix(double(length(data)), nrow=nrow(data))
es = NULL
sc = NULL
ms = sigma * max(data)
if (estimator == "kernel")
es = 0
else if (estimator == "median")
es = 1
else if (estimator == "M_mean")
es = 2
else if (estimator == "M_median")
es = 3
else if (estimator == "test_mean")
es = 5
else if (estimator == "test_median")
es = 6
else
stop("estimator \"", estimator, "\" unknown.")
if (es==2 || es ==3) {
if (score == "gauss") {
sc = 0
}
if (score == "huber") {
sc = 1
#ms = sigma/2
}
if (score == "mean") {
sc = 9
}
}
env = ceiling(sqrt((h1n * nrow(data))^2 + (h2n * ncol(data))^2))
kernmat = NULL
if (kernel == "mean" || es >= 5) {
kern = 0
} else if (kernel == "linear") {
kern = 1
} else if (kernel == "linear2") {
kern = 2
} else if (kernel == "gauss") {
kern = 3
} else if (kernel == "func") {
kern = 4
kernmat = double(asteps * (2 * env + 1)^2)
for (i in ((-env):env)) {
for(j in ((-env):env)) {
for (k in (0:(asteps - 1))) {
theta = -pi/2 + (k * pi/asteps)
x = epR1(theta, i/nrow(data), j/ncol(data))/h1n
y = epR2(theta, i/nrow(data), j/ncol(data))/h2n
kernmat[k * (2 * env + 1)^2 + (i + env) * (2 * env + 1) + (j + env) + 1] =
kernelfunc(2 * x, y)
}
}
}
} else {
stop("kernel \"",kernel,"\" unknown.")
}
if (es == 1)
kern = 0
if (!is.null(es)) {
result = .C("c_edgepoints",
as.double(data),
nrow(data),
ncol(data),
as.integer(kern), # kernel
as.double(h1n),
as.double(h2n),
as.integer(es),
as.integer(sc), # Typ der Scorefunktion
as.double(sigma), # Sigma
as.double(kernmat), # Gewichtsmatrix
as.double(ms), # Max_Schritt
as.integer(asteps),
angle = angle,
value = value,
PACKAGE = "edci")
}
value = result$value
angle = result$angle
if (es == 5 || es == 6)
value = -value
if (margin == FALSE) {
if (es == 5 || es == 6)
v = 1
else
v = 0
value[c(1:env,(nrow(value) - env + 1):nrow(value)), ] = v
value[, c(1:env, (ncol(value) - env+1):ncol(value))] = v
} else if (margin == "cut") {
value = value[(env + 1):(nrow(value) - env), (env + 1):(ncol(value) - env)]
angle = angle[(env + 1):(nrow(angle) - env), (env + 1):(ncol(angle) - env)]
}
list(value = value, angle = angle)
}
eplist = function(data, maxval, test = FALSE, xc = NULL, yc = NULL) {
if (test == TRUE) {
data[[1]] = -data[[1]]
maxval = -maxval
}
n = sum(data[[1]] > maxval)
if (is.null(xc))
xc = seq(1/nrow(data[[1]]), 1, 1/nrow(data[[1]]))
if (is.null(yc))
yc = seq(1/ncol(data[[1]]), 1, 1/ncol(data[[1]]))
o = order(data[[1]], decreasing = TRUE)[1:n]
result = cbind(xc[(o - 1) %% nrow(data[[1]]) + 1], yc[(o - 1) %/% nrow(data[[1]]) +1 ],
data[[2]][o])
colnames(result) = c("x", "y", "angle")
result
}
|
4e4ff604aaf7b5ff470c8227b043cf073c00c388 | d3fdbf9442b8e0ffbc208ad50087f0ece05f405e | /Modulo 3- Resampling-Bayesianos-Markov/Ejercicio 3.3/Ejercicio3_3_MarianaSilvera.R | c9857fb242ead0825c194c8628a35108f8e2f36e | [] | no_license | msilvera/R-DataAnalysis2021-OTGA | b176f5f48076ce57ed1c7935fbe37ada31f21bda | 1bc03219b4d36c73d2196534c111878476d4373d | refs/heads/main | 2023-06-14T21:55:42.036064 | 2021-07-04T18:38:51 | 2021-07-04T18:38:51 | 380,082,123 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 947 | r | Ejercicio3_3_MarianaSilvera.R | library(FSAdata)
library(MASS)
library(dplyr)
#library(help="FSAdata")
#cargo los datos
#data <- WalleyeErie2
summary(WalleyeErie2)
data <-subset(x=WalleyeErie2, subset = !is.na(w)) #elimino los datos incompletos
summary(data)
set.seed(1) # semilla para el random
data <- data %>% mutate_at(vars("age"), factor) # transformo en factor la comlumna edad
#extraigo el 80% de los datos para entrenamiento
intrain <- sample(1:nrow(data), size = round(0.8*nrow(data)))
#genero modelo para la edad en base a las demas variables
lda.fit <-lda(age~. , data= data, subset= intrain)
lda.fit
#verifico que tan bien se comporta el discriminante lineal generado
lda.pred <- predict(lda.fit, data)
names(lda.pred)
#obtenfo la clase
lda.class <- lda.pred$class
#construyo la matriz
table(lda.class, data$age)
#veo que tan bien se ajusta, utilizando la media
mean(lda.class==data$age)
#resultado, desempeño de : 0.6571231 |
eb3d9c97b02f6f8d4ca16e857d987432473f6d4c | 89d2d6b83bb0fcad3db66b139a617b0cc40bf34a | /R3-Aliona.R | dfc622532aca8311dc0e2430e94dcfdf29a65c9b | [] | no_license | alionahst/R3 | 5e6760cab681ab10149267ed31884ccb16cc6eb5 | d980eddc32efd762b3178bc3933b8ba486929944 | refs/heads/master | 2023-01-03T05:25:17.275377 | 2020-10-20T22:13:42 | 2020-10-20T22:13:42 | 305,684,425 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,799 | r | R3-Aliona.R | #Chapter 3: Basic graphics and data - Aliona Hoste
demo(graphics)
plot(iris)
#1. Plot a cheat-sheet with values of color and point type (col = , and pch = ) from 1 to 25, and export it as a jpeg of 15 cm wide, 6 cm high and resolution 100 points per cm.
plot(0, 0, xlim = c(0, 26), ylim = c(0.5, 1.5)
, ylab = "color", xlab = "color number", yaxt = "n")
for (i in 1:25) {
points(i, 1, pch = i, col = i, cex = 1.5)
}
jpeg(filename = "firstplot.jpeg", width = 15, height = 6, units = "cm", res = 100)
plot(0, 0, xlim = c(0, 26), ylim = c(0.5, 1.5)
, ylab = "colors & sign", xlab = "color number", yaxt = "n")
for (i in 1:25) {
points(i, 1, pch = i, col = i, cex = 1.5)
}
dev.off()
#2. Plot into a graph ten Poisson distributions with lambda ranging from 1 to 10. Put legend and title. Export it as a .tiff file with size of 15x15 cm.
x <- seq(-1, 20, 1) # Sequence
y <- dpois(x, lambda = 1) # densities for x
plot(x, y, type = "n") # Empty plot (type = "n")
for(i in 1:10){
y <- dpois(x, lambda = i)
lines(x, y, col = i)
}
title(main="Poisson distribution, lambda = 1:10")
legend("topright", legend = paste("lambda =", 1:10),lty = 1, col = 1:10)
#export into tiff plot
tiff("Plot1_poisson_1to10.tiff", width = 15, height = 15, units = "cm",
bg = "transparent", res = 150) # Open the device "Plot1.tiff"
x <- seq(-1, 20, 1) # Sequence
y <- dpois(x, lambda = 1) # densities for x
plot(x, y, type = "n") # Empty plot (type = "n")
for(i in 1:10){
y <- dpois(x, lambda = i)
lines(x, y, col = i)
}
title(main="Poisson distribution, lambda = 1:10")
legend("topright", legend = paste("lambda =", 1:10),lty = 1, col = 1:10)
dev.off()
#3. Import data from this article: https://peerj.com/articles/328/
Webcsv <- "https://dfzljdn9uc3pi.cloudfront.net/2014/328/1/Appendix1.csv"
Data <- read.table(Webcsv, header = T, sep = ",", skip = 2)
str(Data)
#Be careful importing the data. Notice that you have to skip two first lines using “skip = 2”13.
#With these data, using for(), plot graphs to represent the effect of all the numerical variables, from “richness” to “mean_quality” on “yield”. Choose the type of graph that you think better represents this effect for the different species. Create only one pdf with all the graphs inside.
#To find the best graph for each type of data, a very helpful web is from Data to Viz https://www.data-to-viz.com/.
plot(Data[-1])
plot(Data$mean_yield ~ Data$richness)
for(i in names(Data[6:12]))
{
plot(Data$mean_yield ~ Data[[i]], ylab = "Mean yields", xlab = as.character(names(Data[i])))
title(main= paste("Mean yield in function of", as.character(names(Data[i]))))
}
|
e592da7c303ff271742d942dd9f20f44ce746226 | 66a2afd9c0dab1d55e6d236f3d85bc1b61a11a66 | /man/sf_execute_report.Rd | 1aaa6ef65435cef0fc7a78005cd3df56a250d1cc | [
"MIT"
] | permissive | StevenMMortimer/salesforcer | 833b09465925fb3f1be8da3179e648d4009c69a9 | a1e1e9cd0aa4e4fe99c7acd3fcde566076dac732 | refs/heads/main | 2023-07-23T16:39:15.632082 | 2022-03-02T15:52:59 | 2022-03-02T15:52:59 | 94,126,513 | 91 | 19 | NOASSERTION | 2023-07-14T05:19:53 | 2017-06-12T18:14:00 | R | UTF-8 | R | false | true | 8,973 | rd | sf_execute_report.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics-report.R
\name{sf_execute_report}
\alias{sf_execute_report}
\title{Execute a report}
\usage{
sf_execute_report(
report_id,
async = FALSE,
include_details = TRUE,
labels = TRUE,
guess_types = TRUE,
bind_using_character_cols = deprecated(),
as_tbl = TRUE,
report_metadata = NULL,
verbose = FALSE
)
}
\arguments{
\item{report_id}{\code{character}; the Salesforce Id assigned to a created
analytics report. It will start with \code{"00O"}.}
\item{async}{\code{logical}; an indicator, by default set to \code{TRUE}, which
executes the report asynchronously. If executed asynchronously, this function
will return a list of attributes of the created report instance. The results
can be pulled down by providing the report id and instance id to
the function \code{\link{sf_get_report_instance_results}}. Refer to the details
of the documentation on why executing a report asynchronously is preferred.}
\item{include_details}{\code{logical}; an indicator applying to a synchronous
indicating whether the run should return summary data with details.}
\item{labels}{\code{logical}; an indicator of whether the returned data should
be the label (i.e. formatted value) or the actual value. By default, the labels
are returned because these are what appear in the Salesforce dashboard and
more closely align with the column names. For example, "Account.Name" label
may be \code{"Account B"} and the value \code{0016A0000035mJEQAY}. The former
(label) more accurately reflects the "Account.Name".}
\item{guess_types}{\code{logical}; indicating whether or not to use \code{col_guess()}
to try and cast the data returned in the recordset. If \code{TRUE} then
\code{col_guess()} is used, if \code{FALSE} then all fields will be returned
as character. This is helpful when \code{col_guess()} will mangle field values
in Salesforce that you'd like to preserve during translation into a \code{tbl_df},
like numeric looking values that must be preserved as strings ("48.0").}
\item{bind_using_character_cols}{\code{logical}; an indicator of whether to
cast the data to all character columns to ensure that \code{\link[dplyr:bind]{bind_rows}}
does not fail because two paginated recordsets have differing datatypes for the
same column. Set this to \code{TRUE} rarely, typically only when having this
set to \code{FALSE} returns an error or you want all columns in the data to be
character.}
\item{as_tbl}{\code{logical}; an indicator of whether to convert the parsed
JSON into a \code{tbl_df}.}
\item{report_metadata}{\code{list}; a \code{list} with one element named
\code{"reportMetadata"} having additional list elements underneath. All possible
elements of \code{reportMetadata} are documented
\href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_getbasic_reportmetadata.htm#analyticsapi_basicmetadata}{HERE},
but you will most commonly only need to specify the following 3 elements to
filter or query the results of an existing report:
\describe{
\item{reportFormat}{A \code{character} specifying the format of the report
with possible values: \code{"TABULAR"}, \code{"SUMMARY"}, \code{"MATRIX"},
or \code{"MULTI_BLOCK"}.}
\item{reportBooleanFilter}{A \code{character} denoting how the individuals
filters specified in \code{reportFilters} should be combined. For example,
\code{"(1OR4)AND2AND3"}}.
\item{reportFilters}{A \code{list} of reportFilter specifications. Each must
be a list with 3 elements: 1) \code{column}, 2) \code{operator}, and 3) \code{value}.
You can find out how certain field types can be filtered by reviewing the results
of \code{\link{sf_list_report_filter_operators}}.}
}}
\item{verbose}{\code{logical}; an indicator of whether to print additional
detail for each API call, which is useful for debugging. More specifically, when
set to \code{TRUE} the URL, header, and body will be printed for each request,
along with additional diagnostic information where available.}
}
\value{
\code{tbl_df} by default, but a \code{list} when \code{as_tbl=FALSE},
which means that the content from the API is converted from JSON to a list
with no other post-processing.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
Get summary data with or without details by running a report synchronously or
asynchronously through the API. When you run a report, the API returns data
for the same number of records that are available when the report is run in
the Salesforce user interface. Include the \code{filters} argument in your
request to get specific results on the fly by passing dynamic filters,
groupings, and aggregates in the report metadata. Finally, you may want to
use \code{\link{sf_run_report}}.
}
\details{
Run a report synchronously if you expect it to finish running quickly.
Otherwise, we recommend that you run reports through the API asynchronously
for these reasons:
\itemize{
\item Long running reports have a lower risk of reaching the timeout limit
when run asynchronously.
\item The 2-minute overall Salesforce API timeout limit doesn’t apply to
asynchronous runs.
\item The Salesforce Reports and Dashboards REST API can handle a higher
number of asynchronous run requests at a time.
\item Since the results of an asynchronously run report are stored for a
24-hr rolling period, they’re available for recurring access.
}
Before you filter a report, it helpful to check the following properties in the metadata
that tell you if a field can be filtered, the values and criteria you can filter
by, and filters that already exist in the report:
\itemize{
\item filterable
\item filterValues
\item dataTypeFilterOperatorMap
\item reportFilters
}
}
\section{Salesforce Documentation}{
\itemize{
\item \href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_getreportrundata.htm}{Sync Documentation}
\item \href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_get_reportdata.htm#example_sync_reportexecute}{Sync Example}
\item \href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_instances_summaryasync.htm}{Async Documentation}
\item \href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_get_reportdata.htm#example_report_async_instances}{Async Example}
\item \href{https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_filter_reportdata.htm#example_requestbody_execute_resource}{Filtering Results}
}
}
\examples{
\dontrun{
# first, get the Id of a report in your Org
all_reports <- sf_query("SELECT Id, Name FROM Report")
this_report_id <- all_reports$Id[1]
# then execute a synchronous report that will wait for the results
results <- sf_execute_report(this_report_id)
# alternatively, you can execute an async report and then grab its results when done
# - The benefit of an async report is that the results will be stored for up to
# 24 hours for faster recall, if needed
results <- sf_execute_report(this_report_id, async=TRUE)
# check if completed and proceed if the status is "Success"
instance_list <- sf_list_report_instances(report_id)
instance_status <- instance_list[[which(instance_list$id == results$id), "status"]]
if(instance_status == "Success"){
results <- sf_get_report_instance_results(report_id, results$id)
}
# Note: For more complex execution use the report_metadata argument.
# This can be done by building the list from scratch based on Salesforce
# documentation (not recommended) or pulling down the existing reportMetadata
# property of the report and modifying the list slightly (recommended).
# In addition, for relatively simple changes, you can leverage the convenience
# function sf_report_wrapper() which makes it easier to retrieve report results
report_details <- sf_describe_report(this_report_id)
report_metadata <- list(reportMetadata = report_details$reportMetadata)
report_metadata$reportMetadata$showGrandTotal <- FALSE
report_metadata$reportMetadata$showSubtotals <- FALSE
fields <- sf_execute_report(this_report_id,
report_metadata = report_metadata)
}
}
\seealso{
Other Report functions:
\code{\link{sf_copy_report}()},
\code{\link{sf_create_report}()},
\code{\link{sf_delete_report}()},
\code{\link{sf_describe_report_type}()},
\code{\link{sf_describe_report}()},
\code{\link{sf_list_report_fields}()},
\code{\link{sf_list_report_filter_operators}()},
\code{\link{sf_list_report_types}()},
\code{\link{sf_list_reports}()},
\code{\link{sf_query_report}()},
\code{\link{sf_run_report}()},
\code{\link{sf_update_report}()}
}
\concept{Report functions}
|
7015d5870ad5056141a600ab0b532cfd67a48a59 | e56da52eb0eaccad038b8027c0a753d9eb2ff19e | /man-roxygen/tipsForTreeGeneration.R | b3874469bd8aa861c1cbae942f72fce3a7ff9898 | [] | no_license | ms609/TreeTools | fb1b656968aba57ab975ba1b88a3ddf465155235 | 3a2dfdef2e01d98bf1b58c8ee057350238a02b06 | refs/heads/master | 2023-08-31T10:02:01.031912 | 2023-08-18T12:21:10 | 2023-08-18T12:21:10 | 215,972,277 | 16 | 5 | null | 2023-08-16T16:04:19 | 2019-10-18T08:02:40 | R | UTF-8 | R | false | false | 174 | r | tipsForTreeGeneration.R | #' @param tips An integer specifying the number of tips, or a character vector
#' naming the tips, or any other object from which [`TipLabels()`] can
#' extract leaf labels.
|
15f17c33f851b0ab97d37c7507f338f9cc08551e | d30fa10aa7b3837145a1d1f0bcff6a55372ea4eb | /plot_kmer_dist.R | a39c14daba7632aabe23b7da8c1d0a54f095915a | [] | no_license | mborche2/Matts_Satellite_Size_Code | 541bfdada9a61238ecb6c59594dbfd5e60766e97 | 824fcf6e8f4ab555df774baa9cd8caf6dd8200ae | refs/heads/master | 2023-03-28T07:29:31.677930 | 2021-03-23T18:57:52 | 2021-03-23T18:57:52 | 348,837,905 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 689 | r | plot_kmer_dist.R | library(ggplot2)
setwd("/n/core/bigDataAI/Genomics/Gerton/jennifer_gerton/jeg10/")
for (i in 1:21){
filenam <- paste("plots/kmer_frequency_asp/kmer_frequency_",toString(i),"_array.tsv",sep = "")
array_specifics <- read.table(filenam,header = FALSE)
freq_table <- table(array_specifics[,2])
freq_df <- as.data.frame(freq_table)
freq_df[,2] <- log(freq_df[,2])
filenam2 <- paste("kmer_frequency_",toString(i),"_array_standard_axes.png",sep="")
png(filenam2)
plot(freq_df,xlab="Number of Occurences of Kmer in Array",ylab="Log (ln) Frequency of Kmer Occurence Number",axes=FALSE)
axis(side=1, at=seq(0,4000,by=25))
axis(side=2, at=seq(0, 10, by=1))
box()
dev.off()
}
|
393e68c42ae3b36432c1265386c913a44b8e6d7e | c97fa9aadc45c44fad6433ae10c772060bde355c | /MyNotes/03 - Geting and Cleaning Data/01 Class_Data.Table_Package.R | 41cd46ae55c1ae3679c91b186b783fad89090d5a | [] | no_license | vitorefigenio/datasciencecoursera | 9866816242d39fa9fc9520bc4d543efc815afeb5 | 03722d0c7c6d219ec84f48e02065493f6657cc0a | refs/heads/master | 2021-01-17T11:17:58.099767 | 2016-02-28T03:06:37 | 2016-02-28T03:06:37 | 29,034,385 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 943 | r | 01 Class_Data.Table_Package.R | #data.table package
# Create Data.Table
install.packages("data.table")
library(data.table)
DF = data.frame(x=rnorm(9), y=rep(c("a","b","c"), each=3), z=rnorm(9))
head(DF,3)
DT = data.table(x=rnorm(9), y=rep(c("a","b","c"), each=3), z=rnorm(9))
head(DT,3)
# comando ara ver tdas as abelas criadas na memória
tables()
# Subsetting rows
DT[2,]
DT[DT$y=="a"]
DT[c(2,3)]
# Subseting columns
DT[,c(2,3)]
# É comum o uso de expressões
{
x=1
y=2
}
k = {print(10);5}
print(k)
# Calculating values for variables with expressions
DT[,list(mean(x),sum(z))]
DT[,table(y)]
# Adding new column
DT[,w:=z^2]
DT
# Multiple Operations
DT[,m:={tmp <- (x+z); log2(tmp+5)}]
plot(DT[,m])
# plyr like operations
DT[, a:=x>0]
DT[,b:= mean(x+w), by=a]
DT
# Special Variables
set.seed(123)
DT <- data.table(x=sample(letters[1:3], 1E5, TRUE))
DT[, .N, by=x]
# keys
DT = data.table(x=rep(c("a","b","c"), each=100), z=rnorm(300))
setkey(DT,x)
DT['a'] |
12d5a52eb7e5fb10a0b5d87bdc8740c29b7c2a5a | 39315660a0226ae527ec8e0c7e6ae866df675b5f | /exercise1/computeCost.R | 5057e5c02d42dd31073fb2393dff4c7ded690bc3 | [] | no_license | Lemmawool/R-Practice | 28a7ce208f7d012eb4bc886fdb27b72754a171e9 | 0c3bed53e27953e9f19f92fd6e7b595a7e379262 | refs/heads/master | 2021-05-14T13:44:03.051151 | 2018-01-22T02:02:51 | 2018-01-22T02:02:51 | 115,955,944 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 105 | r | computeCost.R | computeCost <- function(X, y, theta){
m = length(y)
return ((1/(2*m)) * sum((X %*% theta - y) ^ 2))
} |
e662f9c90536aa7a7802ef2046cda55ac460d02e | 63227ea5a4085bb789824448502c95a98d8f375f | /cachematrix.R | 4e87ebeb3d25a82fe63b07127301dae99ce920d6 | [] | no_license | lfdelama/ProgrammingAssignment2 | f81f6ae4cf9246cc21a2fce019bc59a04949303d | 417909969f9fdd8c4d23700e1fcf535237a2c2ec | refs/heads/master | 2020-12-24T14:18:50.589091 | 2014-05-22T21:32:07 | 2014-05-22T21:32:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,314 | r | cachematrix.R | ## These two functions below are used to cache the inverse of a square matrix,
## so every time the same inverse is required, it doesn't need to be recomputed.
## This function creates a special matrix which
## contains a list of the following functions:
## - set, to set the value of the matrix
## - get, to get the value of the matrix
## - setinverse, to set the value of the inverse of the matrix
## - getinverse, to get the value of the inverse of the matrix
makeCacheMatrix<- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function calculates and returns the inverse of the special matrix that
## it was created with the above function.
## If the inverse of the matrix has previously calculated, this function will return
## directly the value stored in the cache of the makeCacheMatrix function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
48f4c3afd8bf9957f151bbbad760e9b7f9c317fe | 64e7ac1d0437b1d874b4ed070e6bda152decddee | /plot2.R | e2892d66195304ed5b560890e85b152215d7920e | [] | no_license | mooctus/ExData_Plotting1 | 072db8facebd27a8a8aab985be057b9b2c2b8122 | 005cba7dd9d88e94113a57eb6f8d77b9a3618811 | refs/heads/master | 2021-01-12T20:07:17.994147 | 2014-05-09T15:54:17 | 2014-05-09T15:54:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 569 | r | plot2.R | Sys.setlocale(category = "LC_ALL", locale = "C")
df <- read.table(file="household_power_consumption.txt", sep=";", na.strings="?", header=TRUE)
df$Time <- strptime(
paste0(df$Date, " ", df$Time),
format=paste0("%d/%m/%Y %H:%M:%S")
)
df$Date <- as.Date(df$Date,format="%d/%m/%Y")
df1 <- df[df$Date %in% as.Date(c('2007-02-01', '2007-02-02')),]
png(filename="plot2.png",width=480, height=480)
with (df1,
plot(Time, Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)")
)
with (df1,
lines(Time, Global_active_power)
)
dev.off()
|
ec9f4ad17398e6d6778438d88eaed81be1b890ff | e5a584e854ce025a135511f692dfc8e7ec178d49 | /grid.R | 07406eefe438ffe694bf2a7367e2ff8229779ff3 | [] | no_license | statspheny/sta242hw2 | 0fdb99c18f21c91dc990fd345c63daaf51067daa | 20aab766d40e91c16f3742f80bc3a953dc0c8846 | refs/heads/master | 2021-01-01T18:11:27.640548 | 2013-02-12T06:40:39 | 2013-02-12T06:40:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,593 | r | grid.R | newTrafficGrid = function(nrow,ncol) {
## This is a function that will
x = matrix(0,nrow=nrow,ncol=ncol)
class(x) = "trafficGrid"
return(x)
}
generateBMLgrid = function(nrow,ncol,nred,nblue) {
## the dimension of the grid
bmldim = c(nrow,ncol)
## randomly sample from nrow*ncol to get the correct number of red and blue cars
n = nrow*ncol
ncars = nred+nblue
## 1. randomly get ncars 2. of ncars, select the red ones 3. set
## the rest to be blue
cars = sample(1:n,ncars)
redcars = sample(cars,nred)
bluecars = cars[!(cars %in% redcars)]
obj = list(dim=bmldim,redcars=redcars,bluecar=bluecars)
class(obj) = "BMLgrid"
return(obj)
}
## generateBMLgrid = function(nrow,ncol,prob) {
## n = nrow*ncol
## data = sample(c(0,1,-1),size=n,prob=c(1-prob,prob/2,prob/2),replace=TRUE)
## x = matrix(data,nrow=nrow,ncol=ncol)
## redval = which(x==-1)
## blueval = which(x==1)
## obj = list()
## obj$matrix = x
## obj$red = redval
## obj$blue = blueval
## class(obj) = "BMLgrid"
## return(obj)
## }
changeCarProb = function(bgrid,prob) {
## This is a function that changes the probability of cars on the
## grid
return(bgrid)
}
changeCarNumber = function(bgrid,nred=NULL,nblue=NULL) {
## This is a function that changes the number of red or blue cars
## on the grid. If NULL, then the number of cars stays the same
return(bgrid)
}
## Methods
summary.BMLgrid = function(BMLgrid) {
print(BMLgrid)
}
plot.BMLgrid = function(obj) {
red = "#FF0000FF"
white = "#FFFFFFFF"
blue = "#0000FFFF"
shiftedmat = t(apply(obj$matrix,2,rev))
image(shiftedmat,col=c(red,white,blue))
}
checkIfCarStuck = function(toMove,inPlace) {
## This function checks if the blue car is stopped behind a red car
return(toMove %in% inPlace)
}
updateBlueCar = function(obj) {
## Each blue car moves up
mat = obj$matrix
nrow = nrow(mat) # number of rows
blueOld = obj$blue
## blueOld-1 moves the each car back one space
## !(blueOld%%ncol-1) is a logical for the cars at the end that reset
## add ncol to the cars that are reset
blueMove = (blueOld-1) + as.numeric(blueOld%%nrow==1)*nrow
## keep the old indices for the cars that are stuck behind old cars
stuckCars = checkIfCarStuck(blueMove,obj$red)
blueMove[stuckCars] = blueOld[stuckCars]
mat[blueOld]=0 #set old blue to zero
mat[blueMove]=1 #set new blue to 1
obj$matrix = mat
obj$blue = blueMove
return(obj)
}
|
7c95eaaba2e639e23869e5b4d852212db33c02c7 | 1ea27108545233075e57b2cc5c3b0ceeeb0c76d9 | /R_model_garch.R | e47c8a221f9921a9953a323155a4a2c5882370dc | [] | no_license | zhen-yang8/Stats451_group | a2af5cd72a2bede65e98d14df8eb41d5b07ac2fb | d8f3a2b44352498433e99fc72e2af53b3b322fed | refs/heads/main | 2023-01-22T13:16:31.244087 | 2020-12-05T01:05:49 | 2020-12-05T01:05:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 918 | r | R_model_garch.R | library(loo)
library(rstan)
garch11_setup <- rstan::stan_model(file = './Stan/model_garch.stan')
dat = read.csv("./data/bitcoin_train.csv")
test = read.csv("./data/bitcoin_test.csv")
y = dat$log_return
N = length(y)
y_test = test$log_return
J = length(y_test)
stan_data <- list(y = y,N = N,sigma1 = 0.01, J = J, y_test = y_test)
garch11 <- rstan::sampling(garch11_setup, data = stan_data)
p1 = stan_plot(garch11, pars = c("mu", "alpha0", "alpha1", "beta1")) +
ggtitle("GARCH(1,1)")
p1
p2 = stan_trace(garch11,
pars = c("mu", "alpha0", "alpha1", "beta1")) +
ggtitle("GARCH(1,1)")
p2
garch11_fit <- rstan::extract(garch11, permuted = TRUE)
mean(garch11_fit$mu)
# Compute the loglikelihood
log_lik <- extract_log_lik(garch11, merge_chains = FALSE)
r_eff <- relative_eff(exp(log_lik), cores = 2)
loo <- loo(log_lik, r_eff = r_eff, cores = 2)
print(loo)
plot(loo, main = "GARCH PSIS diagnostic plot")
|
e9b0112a388da3956102d6a142e3159e47c7b12f | 4c9b4fc35664cf660189b490ffb11ff562d06439 | /man/computeOverallImbalance.Rd | 63d73f98eecfbf4bb1770928a3396fa98a914531 | [] | no_license | atrihub/SRS | 41494f7cef824d0424828a1e0d835b432e7695ac | a0cf093d1565a937d0bc0821d6aaa781e649a951 | refs/heads/master | 2023-05-08T07:28:31.477902 | 2021-05-24T19:30:27 | 2021-05-24T19:30:27 | 104,920,017 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,861 | rd | computeOverallImbalance.Rd | \name{computeOverallImbalance}
\alias{computeOverallImbalance}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ A function to compute overall imbalances for each treatment }
\description{
This function computes the overall treatement imbalances for each treatment
}
\usage{
computeOverallImbalance(object, imbalances)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ An object of class \code{PocockSimonRandomizer}}
\item{imbalances}{ \code{imbalances} a matrix of imbalance measures,
usually the result of the function call to
\code{\link{computeImbalances}}}
}
\details{
}
\value{
A vector of imbalances resulting from assigning each of the
treatments, in turn.
}
\references{ Sequential Treatment Assigment with Balancing for Prognostic
Factors in the Controlled Clinical Trial, by S.~J.~Pocock and
R.~Simon, Biometrics, 31, 103-115}
\author{ Balasubramanian Narasimhan }
\note{ }
\seealso{ }
\examples{
expt <- ClinicalExperiment(number.of.factors = 3,
number.of.factor.levels = c(2, 2, 3),
number.of.treatments = 3)
r.obj <- new("PocockSimonRandomizer", expt, as.integer(12345))
computeOverallImbalance(r.obj, computeImbalances(r.obj, c("1","2","2")))
##
## Another example
##
ex.matrix <- matrix (c(9,8,8,9,8,4,5,
10,7,6,11,8,5,4,
9,7,7,9,8,3,5), nrow=3, byrow=TRUE)
rownames(ex.matrix) <- c("Tr1", "Tr2", "Tr3")
colnames(ex.matrix) <- c("F1:1", "F1:2", "F2:1", "F2:2", "F3:1", "F3:2", "F3:3")
stateTable(r.obj) <- ex.matrix
computeOverallImbalance(r.obj, computeImbalances(r.obj, c("1","2","2")))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ design }
\keyword{ distribution }% __ONLY ONE__ keyword per line
|
cce30da1726ce79e12a8071b14d1336ed9eafb45 | 7c32bd1a1ea4b9a9bab53dcd206e9154206e7bab | /samples/R_Models/LogisticReg_Rmodel/training.R | e09007a64eaa9d9b28bb13e47e141645067d10df | [
"Apache-2.0"
] | permissive | paataugrekhelidze/model-management-resources | 55d92159fb5ffd97460c44f0d495cdb8308d96da | e3cc8719f349f9755690a4cf87f7e75574966e9c | refs/heads/main | 2023-08-21T11:56:30.560413 | 2021-09-23T18:02:59 | 2021-09-23T18:02:59 | 424,690,327 | 0 | 0 | Apache-2.0 | 2021-11-04T17:57:08 | 2021-11-04T17:57:08 | null | UTF-8 | R | false | false | 622 | r | training.R | # Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
inputdata <- read.csv(file="hmeq_train.csv", header=TRUE, sep=",")
attach(inputdata)
# -----------------------------------------------
# FIT THE LOGISTIC MODEL
# -----------------------------------------------
reg<- glm(BAD ~ VALUE + factor(REASON) + factor(JOB) + DEROG + CLAGE + NINQ + CLNO , family=binomial)
# -----------------------------------------------
# SAVE THE OUTPUT PARAMETER ESTIMATE TO LOCAL FILE OUTMODEL.RDA
# -----------------------------------------------
save(reg, file="reg.rda")
|
473c2e4ddd8a5eda52aa13b7c5dd97e6401b60c7 | 4487f71ef15b6712e60cc28a6e6e4918abf612fa | /Popgen_HW1.R | 2994020562d6e41de964c2af4352d54ccc0e6aaa | [] | no_license | maccwinter/Genanalyse | 8cf3d5e4c370da0489b33f39d8920f7016a5e014 | 757073491f7e062ecda1c3b009dc2924ef4e163a | refs/heads/master | 2020-07-31T03:54:31.650358 | 2019-09-25T18:03:13 | 2019-09-25T18:03:13 | 210,476,455 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,498 | r | Popgen_HW1.R | #Popgen HW 1
#1B
#The genotype frequences can be represented as by the following, where the allele frequencies of S, I and G are represented by fs, fi, and fg, repectively.
#GSS = fs^2
#GFF =ff^2
#GII = fi^2
#GSF = 2fs*ff
#GSI = 2fs*fi
#GFI = 2ff*fi
#1B
#tot represents the total population number
tot <- 141 + 111 + 15 + 28 + 32 + 5
tot
#The population total is 332 individuals
#allelefreq is a function to calculate the allele frequencies where x represents homozygotes where y and z represent the two different heterozygotes.
allelefreq <- function(x,y,z) {(x + (y + z)/2)/tot}
#fs represents the allele frequency of S
fs <- allelefreq(141,111,15)
fs
#fs is 0.6144578
#ff is the allele frequency for the F allele
ff <- allelefreq(28,32,111)
ff
#ff is 0.2996988
#fi is the allele frequency of I
fi <- allelefreq(5,32,15)
fi
#fi is 0.08584337
#fs, fi, and ff need to add to 1
fs + fi + ff
#and they do :)
#And now to calculate genotype frequencies.
GSS = fs^2
GSS
#GSS is 0.3775584
GFF =ff^2
GFF
#GFF is 0.08981937
GII = fi^2
GII
#GII is 0.007369085
GSF = 2*fs*ff
GSF
#GSF is 0.3683045
GSI = 2*fs*fi
GSI
#GSI is 0.1054943
GFI = 2*ff*fi
GFI
#GFI is 0.05145431
#These genotype frequences should add to 1
GSS + GFF + GII + GFI +GSF +GSI
#AND THEY DO!!
#ge is the expected genotype totals of all individuals in the population
ge <- tot*c(GSS,GFF,GII,GSF,GSI,GFI)
ge
#egenotypenames is an object I'm going to use just to symbolically represent each expected genotype.
egenotypenames <- c("SS","FF","II","SF","SI","FI")
egenotypenames
egnames <- as.list(egenotypenames)
#Now I am going to represent the total amount of individuals per genotype with the following array: expectedgenotypes
expectedgenotypes <- data.frame(ge, row.names = egnames)
expectedgenotypes
# Expected Genotypes
#SS 125.349398
#FF 29.820030
#II 2.446536
#SF 122.277108
#SI 35.024096
#FI 17.082831
#go is the observed genotypes
go <- c(141,28,5,111,15,32)
go
#dif is the difference between and expected genotypes
dif <- go-ge
dif
#dif 2 is the square values of the differences between observed and expected genotypes
dif2 <- dif^2
#prechi is the division for genotypes of (observed - expected) by the corresponding expected values
prechi <- dif2/ge
prechi
#chi2 is the sum of all the values in prechi (aka the chi squared value)
chi2 <- sum(prechi)
chi2
#The chi squared value is 30.24456
#There are 2 non-independent variables and 6 possible genotypes. So k, the degrees of freedome is:
k <- 6-1-2
k
#There are 3 degrees of freedom.
#This chi squared value at a k of 3 statistically deviates from Hardy Weinberg.
#Question 2
#homo represents the frequency of homozygotes (based on the Hardy-Weinberg model) and hete represents the heterozygote frequency
homo<- function(p,q){(p^2)+(q^2)}
hete <- function(p,q){2*p*q}
homop0 <- homo(0,1)
homop0
homop0.25 <- homo(0.25,0.75)
homop0.25
homop0.5 <- homo(0.5,0.5)
homop0.5
homop0.75 <- homo(0.75,0.25)
homop0.75
homop1 <- homo(1,0)
homozygotes <-c(homop0,homop0.25,homop0.5,homop0.75,homop1)
homozygotes
heteq1 <- hete(0,1)
heteq0.75 <- hete(0.25,0.75)
heteq0.5 <- hete(0.5,0.5)
heteq0.25 <- hete(0.75,0.25)
heteq0 <- hete(1,0)
heterozygotes <- c(heteq0,heteq0.25,heteq0.5,heteq0.75,heteq1)
heterozygotes
pfreq <- list("0","0.25","0.5","0.75","1")
pfreq
genotype_frequencies <- data.frame(homozygotes, heterozygotes, row.names = pfreq)
genotype_frequencies
#p homozygotes heterozygotes
#0 1.000 0.000
#0.25 0.625 0.375
#0.5 0.500 0.500
#0.75 0.625 0.375
#1 1.000 0.000
#Heterozygote frequencies are maximized when p (and also q) is 0.5.
#Question 3
#D = gOD-pOpD where D is the non-d allele.
#gOD = 0.1 +(0.67)(0.4)
gOD <- 0.1 +0.67*0.4
gOD
#gOD = 0.368. O- individuals are homozygotes for O and the non-d allele (D). And gOD is 0.368 so:
Omindividuals <- gOD^2
Omindividuals
#There is a frequency of 0.135424 O- individuals in the population.
#Question 4A
# RF = (sum of recombinant progeny/total progeny)*100
RF <- ((7+12)/(63+7+12+58))*100
RF
#RF = 13.57143
#r = 0.13
r<-0.13
#4B
#Dt = Do((1-r)^t)
#Algebra --- t = ln(Dt/Do)/ln(1-r)
#t = ln(0.05/0.23)/ln(1-r)
t = log(0.05/0.23)/log(1-r)
t
#After 10.95816 generations of random mating, so 11 generations, D degrades to 0.05.
#Problem 5A
# pi = (#differences/#combinations)
pi <- (3+2+2+1+3+3+2+3+3)/10
pi
#pi = 2.2
#5B is written out on the paper. I expect pi to increase in subsequent generations after inbreeding.
#pi should increase faster for an inbreeding population, while it decreases for an outbreeding population.
#Problem 6
# f = 1 - (observed het/expected het)
# expected heterozygosity = 2 pq
q <- function(p){1-p}
expectedhet<- function(p){2*p*q(p)}
expectedhet(0.1)
#observed heterozygosity (GAa): observedhet = expectedhet(1-f).
GAa <- function(p,f){expectedhet(p)*(1-f)}
GAa(0.1,0.5)
#There are two types of homozygotes GAA and Gaa.
#p = GAA + 0.5*GAa
#q = GAa + 0.5*GAa
#GAA = p - 0.5*GAa
#Gaa = q - 0.5*GAa
GAA <- function(p,f){p -0.5*GAa(p,f)}
Gaa <- function(p,f){q(p)-0.5*GAa(p,f)}
gametestot <- function(p,f){GAA(p,f)+GAa(p,f)+Gaa(p,f)}
#6a
GAa(0.1,0.5)
#GAa = 0.09
GAA(0.1,0.5)
#GAA = 0.055
Gaa(0.1,0.5)
#Gaa = 0.855
gametestot(0.1,0.5)
#They add to 1!
#6b
GAa(0.3,0.02)
#GAa = 0.4116
GAA(0.3,0.02)
#GAA = 0.0942
Gaa(0.3,0.02)
#Gaa = 0.4942
gametestot(0.3,0.02)
#Adds to 1
#6c
GAa(0.5,-0.3)
#GAa = 0.65
GAA(0.5,-0.3)
#GAA = 0.175
Gaa(0.5,-0.3)
#Gaa = 0.175
gametestot(0.5,-0.3)
#Adds to 1
|
9093cbd3ee1f51d6141b1c6c53647dbc68a901a3 | db65898c2edba5bca72e85b7d0382ae03e19d244 | /man/CASCrefmicrodata.Rd | 532129ad14d7c79f53e0c70c76cc549fc29a851e | [] | no_license | sdcTools/sdcMicro | 9193dd10c9cec6a16d90b32d4f8a78c84283bbd3 | 74ba57c4b579d2953d6e7bfa36a6cd648e7fff10 | refs/heads/master | 2023-09-05T05:20:31.170506 | 2023-08-30T10:54:09 | 2023-08-30T10:54:09 | 12,051,341 | 61 | 32 | null | 2023-08-30T09:52:47 | 2013-08-12T08:30:12 | R | UTF-8 | R | false | true | 1,442 | rd | CASCrefmicrodata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataSets.R
\docType{data}
\name{CASCrefmicrodata}
\alias{CASCrefmicrodata}
\title{Census data set}
\format{
A data frame sampled from year 1995 with 1080 observations on the
following 13 variables. \describe{
\item{AFNLWGT}{Final weight (2 implied decimal places)}
\item{AGI}{Adjusted gross income}
\item{EMCONTRB}{Employer contribution for hlth insurance}
\item{FEDTAX}{Federal income tax liability}
\item{PTOTVAL}{Total person income}
\item{STATETAX}{State income tax liability}
\item{TAXINC}{Taxable income amount}
\item{POTHVAL}{Total other persons income}
\item{INTVAL}{Amt of interest income}
\item{PEARNVAL}{Total person earnings}
\item{FICA}{Soc. sec. retirement payroll deduction}
\item{WSALVAL}{Amount: Total Wage and salary}
\item{ERNVAL}{Business or Farm net earnings}}
}
\source{
Public use file from the CASC project. More information on this
test data can be found in the paper listed below.
}
\description{
This test data set was obtained on July 27, 2000 using the public use Data
Extraction System of the U.S. Bureau of the Census.
}
\examples{
data(CASCrefmicrodata)
str(CASCrefmicrodata)
}
\references{
Brand, R. and Domingo-Ferrer, J. and Mateo-Sanz, J.M., Reference
data sets to test and compare SDC methods for protection of numerical
microdata. Unpublished.
\url{https://research.cbs.nl/casc/CASCrefmicrodata.pdf}
}
\keyword{datasets}
|
029876e4c43604c12ff1493065a5d9dac214c441 | 38e6bf92a54267ad564bcfc2550f49d807b11686 | /src/QC/2_proteinGroups_QC.R | 4e5743b081b061f95b613b6fab46638f538e920a | [] | no_license | JoWatson2011/APEX2_Analysis_Watson_Ferguson_2022 | 997972a98a1ff4b4d35f7bc0a6cd4bad1566bc89 | 1ffb39f59a7f57a14ed08164742fc2bccb5a3399 | refs/heads/master | 2023-04-10T19:58:05.692462 | 2022-10-14T09:52:03 | 2022-10-14T09:52:03 | 490,328,742 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,314 | r | 2_proteinGroups_QC.R | library(dplyr)
library(tidyr)
library(gplots)
library(data.table)
library(patchwork)
# library(readr)
library(RColorBrewer)
library(ggplot2)
# Read proteinGroups
proteinGroups <- readRDS("data/proteinGroups.RDS")
experiment_cols <- grep("LFQ intensity .*_P_",
colnames(proteinGroups),
value = T)
# Convert Intensity columns to 'numeric'
proteinGroups[,experiment_cols] <- apply(proteinGroups[,experiment_cols],
2,
as.numeric)
# Summary of variables proteinGroups will be filtered on.
# After running the script this information will be stored in the
# variable named report.
report <- vector()
report["Total Proteins Identified"] <- nrow(proteinGroups)
report["Potential Contaminants"] <- sum(proteinGroups$`Potential contaminant` == "+")
report["Reverse"] <- sum(proteinGroups$Reverse == "+")
report["Only Identified by Site"] <- sum(proteinGroups$`Only identified by site` == "+")
report["No Quantitative Data (incomplete cases)"] <- proteinGroups %>%
select(grep("LFQ intensity" , experiment_cols, value = T)) %>%
filter(rowSums(. == 0) == ncol(.)) %>% nrow()
# Filter proteinGroups.txt to remove contaminants, reverse.
proteinGroups_flt <- proteinGroups %>%
filter(`Potential contaminant` != "+",
`Reverse` != "+",
`Only identified by site` != "+",
`Razor + unique peptides` > 1,
`Unique + razor sequence coverage [%]` >= 5
)
# Add info to report
report["Proteins remaining following filtering"] <- nrow(proteinGroups_flt)
report["Incomplete cases in filtered dataset"] <- proteinGroups_flt %>%
select(all_of(experiment_cols)) %>%
filter(rowSums(. == 0) == ncol(.)) %>%
nrow()
# Filter rows with no quantitative information
proteinGroups_flt_cc <- proteinGroups_flt[rowSums(proteinGroups_flt[experiment_cols] > 0) >= 1,]
#Collapse gene/protein names
proteinGroups_flt_cc <- proteinGroups_flt_cc %>%
mutate(`Gene names` = gsub(";.*", "", `Gene names`),
`Majority protein IDs` = gsub(";.*", "", `Majority protein IDs`),
`Protein names` = gsub(";.*", "", `Protein names`))
# Transform
proteinGroups_log <- apply(proteinGroups_flt_cc[,experiment_cols], 2, function(i){
tmp <- ifelse(i == 0, NA, i)
tmp2 <- log10(tmp)
# tmp3 <- limma::normalizeBetweenArrays(tmp2, "cyclicloess")
return(tmp2)
})
# Normalise
proteinGroups_norm <- cbind(
proteinGroups_flt_cc[,!(names(proteinGroups_flt_cc) %in% c(experiment_cols))],
limma::normalizeBetweenArrays(proteinGroups_log, "quantile")
)
# Visualise normalilsation
proIntUnnorm <- proteinGroups_flt_cc %>%
select(`id`, all_of(experiment_cols)) %>%
pivot_longer(cols = -c(`id`),
names_to = "experiment",
values_to = "intensity") %>%
mutate(
intensity = ifelse(intensity == 0, NA, intensity),
intensity = log10(intensity),
rep = substr(experiment,
nchar(experiment)-1,
nchar(experiment))) %>%
ggplot(aes(x = intensity, color = rep, group = experiment)) +
geom_density() +
theme(legend.position = 'none')
proIntNorm <- proteinGroups_norm %>%
select(`id`, all_of(experiment_cols)) %>%
pivot_longer(cols = -c(`id`),
names_to = "experiment",
values_to = "intensity") %>%
mutate(rep = substr(experiment,
nchar(experiment)-1,
nchar(experiment))) %>%
filter(intensity < 10) %>%
ggplot(aes(x = intensity, color = rep, group = experiment)) +
geom_density() +
theme(legend.position = 'none') +
ggtitle("", subtitle = "Normalised Intensities")
proIntUnnorm / proIntNorm
ggsave("results/figs/QC/proNormCurve.tiff",
proIntUnnorm / proIntNorm,
width = 210, height = 297, units = "mm")
saveRDS(proteinGroups_norm, "data/proteinGroups_Flt.rds")
write.csv(proteinGroups_norm, "data/proteinGroups_Flt.csv")
#### FIGURES
# Pie chart to visualise propoprtion of missing values across all experiments.
pie(table(proteinGroups_flt_cc[,experiment_cols] == 0 |
is.na(proteinGroups_flt_cc[,experiment_cols])),
labels = c("Not NA", "NA"),
main = "Missing Ratios in Filtered Data")
#Heatmap correlation
cors <- proteinGroups %>%
select(c(grep("_A_", colnames(.)),
grep("_T_", colnames(.)))
) %>%
cor(use = "pairwise.complete.obs")
rownames(cors) <- colnames(cors) <- gsub("LFQ intensity ",
"",
colnames(cors))
cor_hm <- cors %>%
as.data.frame() %>%
tibble::rownames_to_column("Exp1") %>%
pivot_longer(-Exp1, names_to = "Exp2") %>%
# mutate(Exp1 = factor(Exp1, levels = rownames(cors)),
# Exp2, factor(Exp2, levels = colnames(cors))
# ) %>%
ggplot(aes(x = Exp1, y = Exp2, fill = value)) +
geom_tile() +
geom_text(aes(label =round(value,2)),size = 2) +
theme(axis.text.x = element_text(size = 5, angle = 45, hjust = 1),
axis.text.y = element_text(size = 5, angle = 45),
axis.title = element_blank(),
legend.key.size = unit(.25, "cm"),
legend.margin = margin(0,0,0,0, "cm"),
plot.margin = margin(0,0,0,0, "cm"),
legend.title = element_text(size = 5),
legend.text = element_text(size = 5)) +
scale_x_discrete(limits=rownames(cors)) +
scale_y_discrete(limits=colnames(cors)) +
scale_fill_gradientn(colours = c("#2C7BB6",
"#ABD9E9",
"#FFFFBF",
"#FDAE61",
"#D7191C"),
limits = c(0,1),
breaks = seq(0,1, 0.2)
)
ggsave("results/figs/forPaper/gg_PRO_CorHM.pdf", cor_hm)
### PCA
pca <- prcomp(t(na.omit(proteinGroups_norm[ , experiment_cols])
)
)
pca_df <- as.data.frame(pca$x)
pca_df$run <- gsub("LFQ intensity ", "",
gsub("_P", "", rownames(pca_df)
)
) %>%
substr(0, nchar(.) - 3)
# pca_df$rep <- gsub("LFQ intensity ", "", rownames(pca_df)) %>%
# substr(nchar(.) - 1, nchar(.))
pca_df$bait <- ifelse(grepl("R2A", pca_df$run), "FGFR2",
ifelse(grepl("R11A", pca_df$run), "RAB11", "GFP")
)
eigs <- pca$sdev^2
pc1<-signif(100*(eigs[1] / sum(eigs)), 4)
pc2<-signif(100*(eigs[2] / sum(eigs)), 4)
pca_g <- ggplot(pca_df, aes(PC1, PC2, color= run, shape = bait)) +
#ggforce::geom_mark_ellipse(aes(group = run, fill = bait), linetype = 0, alpha = 0.2) +
geom_point(size = 3, alpha = 0.7)+
scale_x_continuous(paste("PC1 (", pc1, "%)", sep=""))+
scale_y_continuous(paste("PC2 (", pc2, "%)", sep=""))+
scale_color_brewer("Run", palette = "Paired") +
guides(color = "none", shape = "none") +
#scale_fill_discrete(type = c("#D73027", "#4575B4", "#838B8B")) +
theme(
plot.title = element_text(size=20, face="bold",hjust = 0.5),
panel.background = element_blank(),
panel.grid = element_line("grey", linetype="dashed"),
legend.key = element_blank(),
axis.line = element_line("black"),
axis.text = element_text(size=6),
axis.title = element_text(size=6, face="bold")
)
ggsave("results/figs/forPaper/proPCA.pdf", pca_g,
width = 60, height = 60, units = "mm", dpi = 300)
###
# Number identified in
# each experiment
###
proNo <- proteinGroups_flt_cc %>%
select(`id`, all_of(experiment_cols)) %>%
pivot_longer(cols = -`id`,
names_to = "experiment",
values_to = "intensity") %>%
filter(intensity != 0) %>%
mutate(experiment = gsub("LFQ intensity ", "", experiment)) %>%
mutate(rep = substr(experiment, nchar(experiment) - 2, nchar(experiment)),
experiment = substr(experiment, 0, nchar(experiment) - 3)
) %>%
group_by(experiment,rep) %>%
summarise(n = n(), .groups = "keep") %>%
group_by(experiment) %>%
summarise(sd = sd(n, na.rm = T),
n = mean(n), .groups = "keep") %>%
ungroup() %>%
unique() %>%
mutate(apex = ifelse(grepl("_A_", experiment),
"APEX", "Total"),
experiment = gsub("_[A|T]", "", experiment)) %>%
ggplot(aes(x = experiment, y = n, fill = experiment,
ymax = n+sd, ymin = n-sd
)) +
geom_col() +
facet_wrap(~ apex, ncol = 1) +
scale_fill_discrete(type = c("#838B8B",
"#D73027",
"#838B8B",
"#ABD9E9",
"#4575B4")
) +
geom_errorbar(width = 0.5) +
theme(
panel.background = element_blank(),
panel.grid = element_line("grey", linetype="dashed"),
legend.key = element_blank(),
axis.line = element_line("black"),
# Text sizes may need modifying based on fig. sizes
axis.text = element_text(size=12),
axis.title = element_text(size=14, face="bold"),
legend.position = "none",
axis.text.x = element_text(angle = 45, hjust = 1)
) +
ggtitle("Quantified Protein Groups")
ggsave("results/figs/forPaper/proNumQuant.pdf", proNo,
width = 7, height = 7)
|
2becc35d251a4f5a52e9bafcff89b1210c8ecd27 | 6e32987e92e9074939fea0d76f103b6a29df7f1f | /googleaiplatformv1.auto/man/GoogleCloudAiplatformV1ListSpecialistPoolsResponse.Rd | fc915960a3fddb469c1f7beb4f1ece82fc7d08c2 | [] | no_license | justinjm/autoGoogleAPI | a8158acd9d5fa33eeafd9150079f66e7ae5f0668 | 6a26a543271916329606e5dbd42d11d8a1602aca | refs/heads/master | 2023-09-03T02:00:51.433755 | 2023-08-09T21:29:35 | 2023-08-09T21:29:35 | 183,957,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 922 | rd | GoogleCloudAiplatformV1ListSpecialistPoolsResponse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1ListSpecialistPoolsResponse}
\alias{GoogleCloudAiplatformV1ListSpecialistPoolsResponse}
\title{GoogleCloudAiplatformV1ListSpecialistPoolsResponse Object}
\usage{
GoogleCloudAiplatformV1ListSpecialistPoolsResponse(
specialistPools = NULL,
nextPageToken = NULL
)
}
\arguments{
\item{specialistPools}{A list of SpecialistPools that matches the specified filter in the request}
\item{nextPageToken}{The standard List next-page token}
}
\value{
GoogleCloudAiplatformV1ListSpecialistPoolsResponse object
}
\description{
GoogleCloudAiplatformV1ListSpecialistPoolsResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Response message for SpecialistPoolService.ListSpecialistPools.
}
\concept{GoogleCloudAiplatformV1ListSpecialistPoolsResponse functions}
|
b7e1117a806ad701ebde8e552a73573769a5ea2b | 8c2253bd47fd3d76f28950d1ef24450b24c4a0d7 | /R/extract_timeseries_annual_landings.R | 3cf07b590b7a7c0115215fcba4003ec7175a9a33 | [] | no_license | cran/StrathE2E2 | bc63d4f0dffdde94da1c7ea41133c09033c0cd4e | 629dc5e7f2e323752349352bb2d651a56c6f4447 | refs/heads/master | 2023-02-25T13:18:59.217896 | 2021-01-22T21:40:05 | 2021-01-22T21:40:05 | 278,343,976 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,487 | r | extract_timeseries_annual_landings.R | #
# extract_timeseries_annual_landings.R
#
#' read designated model
#'
#' returns a model object with run and data slots
#'
#' @param model current model
#' @param build model build object
#' @param out model output
#'
#' @return inshore/offshore annual landings
#'
#' @noRd
#
# ------------------------------------------------------------------------------
extract_timeseries_annual_landings <- function(model, build, out) {
setup <- elt(model, "setup")
identifier <- elt(setup, "model.ident")
resultsdir <- elt(setup, "resultsdir")
run <- elt(build, "run")
nyears <- elt(run, "nyears")
#Print some of the full time series data to a csv file
#-----------------------------------------------------------------
offshore_annual_group_land_disc<-data.frame(year=seq(1,nyears))
offshore_annual_group_land_disc$PFland<-rep(0,nyears)
offshore_annual_group_land_disc$DFQland<-rep(0,nyears)
offshore_annual_group_land_disc$DFNQland<-rep(0,nyears)
offshore_annual_group_land_disc$MFland<-rep(0,nyears)
offshore_annual_group_land_disc$SBland<-rep(0,nyears)
offshore_annual_group_land_disc$CBland<-rep(0,nyears)
offshore_annual_group_land_disc$CZland<-rep(0,nyears)
offshore_annual_group_land_disc$BDland<-rep(0,nyears)
offshore_annual_group_land_disc$SLland<-rep(0,nyears)
offshore_annual_group_land_disc$CTland<-rep(0,nyears)
offshore_annual_group_land_disc$KPland<-rep(0,nyears)
offshore_annual_group_land_disc$PFdisc<-rep(0,nyears)
offshore_annual_group_land_disc$DFQdisc<-rep(0,nyears)
offshore_annual_group_land_disc$DFNQdisc<-rep(0,nyears)
offshore_annual_group_land_disc$MFdisc<-rep(0,nyears)
offshore_annual_group_land_disc$SBdisc<-rep(0,nyears)
offshore_annual_group_land_disc$CBdisc<-rep(0,nyears)
offshore_annual_group_land_disc$CZdisc<-rep(0,nyears)
offshore_annual_group_land_disc$BDdisc<-rep(0,nyears)
offshore_annual_group_land_disc$SLdisc<-rep(0,nyears)
offshore_annual_group_land_disc$CTdisc<-rep(0,nyears)
offshore_annual_group_land_disc$KPdisc<-rep(0,nyears)
inshore_annual_group_land_disc <- offshore_annual_group_land_disc
for(ik in 1:nyears){
offshore_annual_group_land_disc$PFland[ik] <- out$landp_o[ (1+(ik*360)) ] - out$landp_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$DFQland[ik] <- out$landd_quota_o[ (1+(ik*360)) ] - out$landd_quota_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$DFNQland[ik] <- out$landd_nonquota_o[ (1+(ik*360)) ] - out$landd_nonquota_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$MFland[ik] <- out$landm_o[ (1+(ik*360)) ] - out$landm_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$SBland[ik] <- out$landsb_o[ (1+(ik*360)) ] - out$landsb_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$CBland[ik] <- out$landcb_o[ (1+(ik*360)) ] - out$landcb_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$CZland[ik] <- out$landcz_o[ (1+(ik*360)) ] - out$landcz_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$BDland[ik] <- out$landbd_o[ (1+(ik*360)) ] - out$landbd_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$SLland[ik] <- out$landsl_o[ (1+(ik*360)) ] - out$landsl_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$CTland[ik] <- out$landct_o[ (1+(ik*360)) ] - out$landct_o[ (1+(ik-1)*360) ]
#No offshore landings of kelp
offshore_annual_group_land_disc$PFdisc[ik] <- out$discpel_o[ (1+(ik*360)) ] - out$discpel_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$DFQdisc[ik] <- out$discdem_quota_o[ (1+(ik*360)) ] - out$discdem_quota_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$DFNQdisc[ik] <- out$discdem_nonquota_o[ (1+(ik*360)) ] - out$discdem_nonquota_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$MFdisc[ik] <- out$discmig_o[ (1+(ik*360)) ] - out$discmig_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$SBdisc[ik] <- out$discsb_o[ (1+(ik*360)) ] - out$discsb_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$CBdisc[ik] <- out$disccb_o[ (1+(ik*360)) ] - out$disccb_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$CZdisc[ik] <- out$disccz_o[ (1+(ik*360)) ] - out$disccz_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$BDdisc[ik] <- out$discbd_o[ (1+(ik*360)) ] - out$discbd_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$SLdisc[ik] <- out$discsl_o[ (1+(ik*360)) ] - out$discsl_o[ (1+(ik-1)*360) ]
offshore_annual_group_land_disc$CTdisc[ik] <- out$discct_o[ (1+(ik*360)) ] - out$discct_o[ (1+(ik-1)*360) ]
#No offshore discards of kelp
inshore_annual_group_land_disc$PFland[ik] <- out$landp_i[ (1+(ik*360)) ] - out$landp_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$DFQland[ik] <- out$landd_quota_i[ (1+(ik*360)) ] - out$landd_quota_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$DFNQland[ik] <- out$landd_nonquota_i[ (1+(ik*360)) ] - out$landd_nonquota_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$MFland[ik] <- out$landm_i[ (1+(ik*360)) ] - out$landm_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$SBland[ik] <- out$landsb_i[ (1+(ik*360)) ] - out$landsb_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$CBland[ik] <- out$landcb_i[ (1+(ik*360)) ] - out$landcb_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$CZland[ik] <- out$landcz_i[ (1+(ik*360)) ] - out$landcz_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$BDland[ik] <- out$landbd_i[ (1+(ik*360)) ] - out$landbd_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$SLland[ik] <- out$landsl_i[ (1+(ik*360)) ] - out$landsl_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$CTland[ik] <- out$landct_i[ (1+(ik*360)) ] - out$landct_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$KPland[ik] <- out$landkp_i[ (1+(ik*360)) ] - out$landkp_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$PFdisc[ik] <- out$discpel_i[ (1+(ik*360)) ] - out$discpel_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$DFQdisc[ik] <- out$discdem_quota_i[ (1+(ik*360)) ] - out$discdem_quota_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$DFNQdisc[ik] <- out$discdem_nonquota_i[ (1+(ik*360)) ] - out$discdem_nonquota_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$MFdisc[ik] <- out$discmig_i[ (1+(ik*360)) ] - out$discmig_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$SBdisc[ik] <- out$discsb_i[ (1+(ik*360)) ] - out$discsb_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$CBdisc[ik] <- out$disccb_i[ (1+(ik*360)) ] - out$disccb_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$CZdisc[ik] <- out$disccz_i[ (1+(ik*360)) ] - out$disccz_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$BDdisc[ik] <- out$discbd_i[ (1+(ik*360)) ] - out$discbd_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$SLdisc[ik] <- out$discsl_i[ (1+(ik*360)) ] - out$discsl_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$CTdisc[ik] <- out$discct_i[ (1+(ik*360)) ] - out$discct_i[ (1+(ik-1)*360) ]
inshore_annual_group_land_disc$KPdisc[ik] <- out$disckp_i[ (1+(ik*360)) ] - out$disckp_i[ (1+(ik-1)*360) ]
}
filename = csvname(resultsdir, "model_inshore_annual_landings_discards", identifier)
writecsv(inshore_annual_group_land_disc, filename, row.names=FALSE)
filename = csvname(resultsdir, "model_offshore_annual_landings_discards", identifier)
writecsv(offshore_annual_group_land_disc, filename, row.names=FALSE)
list(
offshore_annual_group_land_disc = offshore_annual_group_land_disc,
inshore_annual_group_land_disc = inshore_annual_group_land_disc
)
}
|
c484eb8d4c0cfa39aa4c9cd14eed0f904cb5df74 | 4ce2d115fc47d9ae734d2bbb54382cdcc820a658 | /BuildComponentLambMortRateCovs/BuildComponentLambMortRateCovs.R | 0f9cc3c85dcad391ab1de5aaba048c232519c962 | [] | no_license | kmanlove/ClusterAssocDataPrep | 051765049c1975b51ea3c7ed2021249f0bcb3a60 | 236d0c7512a8b924e8986a20e5e690af72c0fd0c | refs/heads/master | 2016-09-06T16:17:42.915787 | 2013-10-08T14:05:33 | 2013-10-08T14:05:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,050 | r | BuildComponentLambMortRateCovs.R | #-- this script reads in the same data as the variance decomposition --#
#-- coxme models --#
filepath <-
"~/work/Kezia/Research/EcologyPapers/ClustersAssociations/Data/RevisedData_11Sept2013/"
relocdata <- read.csv(paste(filepath,
"RelocsWithNetworkMeasures/FullEweDataAllSummerRelocs_MinEdge.1_18Sept2013.csv", sep = ""), header = T)
relocdata$component.ind <- paste(relocdata$Pop, "_", relocdata$Year, "_",
relocdata$res.component, sep = "")
relocdata$popear.ind <- paste(relocdata$Pop, "_", relocdata$Year, sep = "")
lambdata <- read.csv(paste(filepath,
"CleanLambSurvData/FullEweRelocsLambSurvDat_MinEdge.1_allewerelocs_18Sept2013.csv",
sep = ""), header = T)
lambdata$LastYearKnownCompMort <- lambdata$LastYearLambStatus <-
lambdata$LastYearCompLambDiedOrNoLamb <- lambdata$ThisYearCompKnownMort <-
lambdata$ThisYearCompLambDiedOrNoLamb <-
lambdata$ThisPopyrLambDiedOrNoLamb <- lambdata$ThisPopyrKnownMort <- rep(NA, dim(lambdata)[1])
for(i in 1:dim(lambdata)[1]){
ewedat <- subset(relocdata, as.character(EWEID) ==
as.character(lambdata$EWEID)[i] &
as.numeric(as.character(Year)) ==
as.numeric(as.character(lambdata$Year[i])) -
1)[1, ]
EwesLastYearComponent <- as.character(ewedat$component.ind)
if(length(EwesLastYearComponent) == 0){
lambdata$LastYearKnownCompMort[i] <- lambdata$LastYearLambStatus[i] <-
lambdata$LastYearCompLambDiedOrNoLamb[i] <- NA
} else {
LastYearCompEwes <- levels(factor(subset(relocdata,
as.character(component.ind) ==
as.character(EwesLastYearComponent))$EWEID))
LastYearCompLambs <- subset(lambdata, as.numeric(as.character(Year)) == as.numeric(as.character(lambdata$Year[i])) - 1 & as.character(EWEID) %in% LastYearCompEwes)
lambdata$LastYearKnownCompMort[i] <- sum(LastYearCompLambs$CENSOR2) /
dim(LastYearCompLambs)[1]
lambdata$LastYearCompLambDiedOrNoLamb[i] <- sum(LastYearCompLambs$CENSOR2) /
length(LastYearCompEwes)
lambdata$LastYearLambStatus[i] <- ifelse(as.character(ewedat$HasLamb) ==
"NoLamb", "NoLamb",
ifelse(ewedat$CENSOR2 == 0,
"LambDied",
ifelse(ewedat$CENSOR2 == 1,
"LambSurvived", NA)))
}
ewedatnow <- subset(relocdata, as.character(EWEID) ==
as.character(lambdata$EWEID)[i] &
as.numeric(as.character(Year)) ==
as.numeric(as.character(lambdata$Year[i])))[1, ]
EwesThisYearComponent <- as.character(ewedatnow$component.ind)
ThisYearCompEwes <- levels(factor(subset(relocdata,
as.character(component.ind) ==
as.character(EwesThisYearComponent))$EWEID))
ThisYearCompLambs <- subset(lambdata, as.numeric(as.character(Year)) ==
as.numeric(as.character(lambdata$Year[i])) &
as.character(EWEID) %in% ThisYearCompEwes)
lambdata$ThisYearKnownCompMort[i] <- sum(ThisYearCompLambs$CENSOR2) /
dim(ThisYearCompLambs)[1]
lambdata$ThisYearCompLambDiedOrNoLamb[i] <- sum(ThisYearCompLambs$CENSOR2) /
length(ThisYearCompEwes)
#-- extract mort levels for this popyear --#
EwesThisPopyr <- as.character(ewedatnow$popear.ind)
ThisPopyrEwes <- levels(factor(subset(relocdata,
as.character(popear.ind) ==
as.character(EwesThisPopyr))$EWEID))
ThisPopyrLambs <- subset(lambdata, as.numeric(as.character(Year)) ==
as.numeric(as.character(lambdata$Year[i])) &
as.character(EWEID) %in% ThisPopyrEwes)
lambdata$ThisPopyrKnownMort[i] <- sum(ThisPopyrLambs$CENSOR2) /
dim(ThisPopyrLambs)[1]
lambdata$ThisPopyrLambDiedOrNoLamb[i] <- sum(ThisPopyrLambs$CENSOR2) /
length(ThisPopyrEwes)
}
write.path <- "~/work/Kezia/Research/EcologyPapers/ClustersAssociations/Data/RevisedData_11Sept2013/LambSurvDatWithLastYearCompCovs/"
write.csv(lambdata, paste(write.path,
"LambDataWithLastYearCompCovs_19Sept2013.csv", sep =
""))
|
dda371063b542d0077a11342b18cb2ba5bfdbe82 | bfaa0d42780ea870d3f0a5e6e529ba0507dd328f | /man/export_header.Rd | 4d859e4958f1b8f66c5ceedc5ed2a7a9ab2d8da6 | [
"MIT"
] | permissive | jpshanno/ingestr | 20d9b709aa40737adaf99f94d779b89d308345de | ef2d692f552fb1ff6b91f6f2053887eae3db5e20 | refs/heads/master | 2021-06-26T16:34:02.405816 | 2020-09-23T14:33:25 | 2020-09-23T14:33:25 | 137,138,501 | 20 | 4 | MIT | 2018-12-10T17:13:10 | 2018-06-12T23:32:09 | R | UTF-8 | R | false | true | 511 | rd | export_header.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ingest_header.R
\name{export_header}
\alias{export_header}
\title{Export header dataframe to a temporary file}
\usage{
export_header(header.info, input.source)
}
\arguments{
\item{header.info}{A dataframe containing file header information}
\item{input.source}{A character string of the input file}
}
\value{
Returns nothing, saves header dataframe to a temporary file.
}
\description{
Export header dataframe to a temporary file
}
|
309d012b61e85077c644b851e354d2fca0852908 | be00bde77c9d86a3da0d38c0eaf2c424ec9b7369 | /man/Gmatrices.Rd | 31e4632d5ccac8d7633d8e955adbb145f5f5a178 | [] | no_license | martinbaumgaertner/varexternal | e719e6b9d68daa66eed7e86d9e40fef0dd4ad08c | 8547e2dca370963da6fd808eea62a1557154f5ac | refs/heads/master | 2022-05-29T17:35:42.614015 | 2022-04-22T20:09:18 | 2022-04-22T20:09:18 | 222,429,967 | 3 | 2 | null | null | null | null | UTF-8 | R | false | true | 634 | rd | Gmatrices.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gmatrices.R
\name{Gmatrices}
\alias{Gmatrices}
\title{Gmatrices}
\usage{
Gmatrices(AL, C, p, hori, n)
}
\arguments{
\item{AL}{VAR model coefficients}
\item{C}{MA representation coefficients}
\item{p}{lag order}
\item{hori}{forecast horizon}
\item{n}{number of variables}
}
\value{
G: derivatives of C wrt A
}
\description{
Computes the derivatives of vec(C) wrt vec(A) based on Lütkepohl H. New introduction to multiple time series analysis. Springer, 2007.
}
\seealso{
https://pdfs.semanticscholar.org/3e18/3a5ec97ff636363e4deedff7eaeee9d894c9.pdf
}
|
66cc4ee237a4799b69dfff1561e8a7b171621da0 | b033ba5c86bbccca8f33a17a91d7d8ba1fc41976 | /man/perm_kCCA.Rd | 11f9486a6e7f1662cbfc5306fc860e0c15f06107 | [] | no_license | neuroconductor/brainKCCA | 889419ba83967592cc5f70cddaf8a23d4abbe27f | e8e08788b4ec395cfe5ba670d13332e03a35814f | refs/heads/master | 2021-07-19T05:44:31.800018 | 2021-05-17T13:38:42 | 2021-05-17T13:38:44 | 126,418,981 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,833 | rd | perm_kCCA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/perm_kCCA.R
\name{perm_kCCA}
\alias{perm_kCCA}
\alias{perm_kCCA_par}
\title{Calculation of Strength of the Connectivity among Multiple Brain Regions}
\usage{
perm_kCCA(x, y, sig = 0.1, gama = 0.1, ncomps = 1, permNum = 50,
kernel = "rbfdot")
perm_kCCA_par(x, y, sig = 0.1, gama = 0.1, ncomps = 1, permNum = 500,
kernel = "rbfdot")
}
\arguments{
\item{x}{region 1, a matrix containing data index by row.}
\item{y}{region 2, a matrix containing data index by row.}
\item{sig}{inverse kernel width for the Radial Basis kernel function
"rbfdot" and the Laplacian kernel "laplacedot".}
\item{gama}{regularization parameter (default: 0.1).}
\item{ncomps}{number of canonical components (default: 1).}
\item{permNum}{number of permutation (default 50).}
\item{kernel}{type of kernel.}
}
\value{
(lists of) list of region index, p-value, region type ("two" or "multiple"),
and region name.
}
\description{
This function is the core part for kernel canonical correlation analysis.
Generally you do not need to use this function unless you are famaliar with kcca
algorithm.
}
\details{
Kernel canonical correlation analysis (KCCA) can explore the
nonlinear relationship between two variables.
It transformed sample vectors into the Hilbert space and maximize
correlation coefficient by solving quadratically regularized Lagrangean function.
Refer to Kang's paper for more details: Kang J, Bowman FD, Mayberg H, Liu H (2016).
"A depression network of functionallyconnected regions discovered via multi-attribute
canonical correlation graphs."NeuroImage,141, 431-441.
}
\references{
\url{https://www.ncbi.nlm.nih.gov/pubmed/27474522}
}
\author{
Xubo Yue, Chia-Wei Hsu (tester), Jian Kang (maintainer)
}
|
92c314d901aa115f62b732ba0a5207c4460f1d33 | acf25199f5311f05b2d3a5119fe2a2e06fb82901 | /analysis.R | 6dca4128a89724d6a24d772d9f48c69aec95f017 | [] | no_license | AndrMenezes/mm2017 | 5dc6e9a6ac1c983b1e570a11fdb4144a446b8beb | 2756e0a2171ca943cc78ab771c63832727e2892c | refs/heads/master | 2021-04-15T09:28:56.306028 | 2019-06-25T22:11:27 | 2019-06-25T22:11:27 | 126,765,309 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 6,409 | r | analysis.R | # Definições gerais -------------------------------------------------------
setwd('C:/Users/User/Dropbox/4° Série/Modelos Mistos/Trabalho')
# setwd('C:/Users/André Felipe/Dropbox/4° Série/Modelos Mistos/Trabalho')
rm(list = ls(all.names = TRUE))
bib <- c('lme4', 'lmerTest', 'lsmeans', 'hnp', 'dplyr', 'ggplot2', 'RLRsim', 'nlme', 'xtable', 'influence.ME')
sapply(bib, require, character.only = T)
dados <- read.table(file = 'planta-final.txt', sep = ',', header = T)
dados$tempo_f <- factor(dados$tempo_f)
dados$arvore <- factor(dados$arvore)
head(dados)
str(dados)
# setwd('C:/Users/André Felipe/Dropbox/4° Série/Modelos Mistos/Trabalho/Relatório')
setwd('C:/Users/User/Dropbox/4° Série/Modelos Mistos/Trabalho/Relatório')
# Descritiva --------------------------------------------------------------
pdf(file = "boxplot-trat.pdf", width = 10.5, height = 6.5)
par(mar = c(3.2, 3.2, 1.5, 1.5), cex = 1.6)
boxplot(diametro ~ trat, data = dados, xlab = '', ylab = '', cex = 0.6, col = 'gray')
mtext("Dose", side = 1, line = 2.0, cex = 1.8)
mtext("Diâmetro (mm)", side = 2, line = 2, cex = 1.8)
graphics.off()
pdf(file = "boxplot-tempo.pdf", width = 10.5, height = 6.5)
par(mar = c(3.2, 3.2, 1.5, 1.5), cex = 1.6)
boxplot(diametro ~ tempo_f, data = dados, xlab = '', ylab = '', cex = 0.6, col = 'gray')
points(x = unique(dados$tempo_f), y = tapply(dados$diametro, dados$tempo_f, mean), pch = 16, col = 'red', cex = 0.8)
mtext("Dias após a avaliação", side = 1, line = 2.0, cex = 1.8)
mtext("Diâmetro (mm)", side = 2, line = 2, cex = 1.8)
graphics.off()
x11()
dados %>% ggplot(aes(x = tempo_f, y = diametro, group = interaction(tempo_f, trat))) +
geom_boxplot(aes(fill = factor(trat)), color = 'black') +
stat_summary(aes(group = 1), fun.y = mean, geom="line", color = 'black') +
stat_summary(aes(group = 1), fun.y = mean, geom="point", color = 'gold') +
labs(y = 'Diâmetro (mm)', x = 'Dias após a avaliação', fill = 'Dose: ') +
theme_bw() +
theme(text = element_text(size=20), panel.grid.minor = element_blank(), legend.position="top",
panel.grid.major = element_line(size = 0.4, linetype = 'dotted', colour = 'gray'))
ggsave(filename = 'boxplot-tempo-trat.pdf', width = 9, height = 6)
# Ajuste do modelo --------------------------------------------------------
mod1 <- lme(fixed = diametro ~ trat + tempo_n + trat*tempo_n, data = dados, random = ~ 1 | arvore)
mod2 <- lme(fixed = diametro ~ trat + tempo_n, data = dados, random = ~ 1 | arvore)
mod3 <- lme(fixed = diametro ~ tempo_n, data = dados, random = ~ 1 | arvore)
anova(mod1, mod2, mod3)
2 * (logLik(mod2) - logLik(mod1))
print(xtable(anova(mod2), digits = 4))
# Comparações das estrutura de correlação ---------------------------------
m.CS <- lme(fixed = diametro ~ trat + tempo_n + trat * tempo_n, data = dados, random = ~ 1 | arvore,
correlation = corCompSymm(form = ~tempo_n|arvore))
m.Exp <- lme(fixed = diametro ~ trat + tempo_n + trat * tempo_n, data = dados, random = ~ 1 | arvore,
correlation = corExp(form = ~tempo_n|arvore, nugget = T))
m.Gaus <- lme(fixed = diametro ~ trat + tempo_n + trat * tempo_n, data = dados, random = ~ 1 | arvore,
correlation = corGaus(form = ~tempo_n|arvore, nugget = T))
anova(m.CS, m.Exp)
anova(m.CS, m.Gaus)
anova(m.Exp, m.Gaus)
# Modelo escolhido --------------------------------------------------------
mod3 <- lme(fixed = diametro ~ tempo_n, data = dados, random = ~ 1 | arvore)
summary(mod3)
intervals(mod3)
mod3 <- lmer(diametro ~ tempo_n + (1 | arvore), data = dados)
res <- summary(mod3)
round(as.data.frame(res$coefficients)[, -3], 4)
round(confint(mod3), 4)
rand(mod3)
# Resíduos ----------------------------------------------------------------
## Resíduos marginais (erro aleatório)
my.hnp <- hnp(mod3, halfnormal = T, how.many.out = T, paint.out = T, plot = T)
pdf(file = "hnp.pdf", width = 11, height = 7)
par(mar = c(3.5, 3.5, 1.2, 0.6), cex = 1.8)
plot(my.hnp, xaxt = 'n', yaxt = 'n', xlab = '', ylab = '', cex = 0.6, ylim = c(0, 10))
mtext("Percentil da N(0, 1)", side = 1, line = 2.0, cex = 1.8)
mtext("Resíduos marginais", side = 2, line =2, cex = 1.8)
abline(h = seq(0, 10, l = 5), v=seq(0, 3, l = 5), col = "gray", lty = "dotted")
axis(1, seq(0, 3, l = 5))
axis(2, seq(0, 10, l = 5), FF(seq(0, 10, l = 5), 1))
graphics.off()
## resíduos de efeitos aleatórios
r2 <- random.effects(mod3)$arvore
pdf(file = "qq-ranef.pdf", width = 11, height = 7)
par(mar = c(3.5, 3.5, 1.2, 0.6), cex = 1.8)
qqnorm(r2[, 1], xaxt = 'n', yaxt = 'n', xlab = '', ylab = '', cex = 0.6, main = ""); qqline(r2[, 1])
mtext("Percentil da N(0, 1)", side = 1, line = 2.0, cex = 1.8)
mtext("Resíduos de efeitos aleatórios", side = 2, line =2, cex = 1.8)
abline(h = seq(-1.5, 1.5, l = 5), v=seq(-2, 2, l = 5), col = "gray", lty = "dotted")
axis(2, seq(-1.5, 1.5, l = 5))
axis(1, seq(-2, 2, l = 5), FF(seq(-2, 2, l = 5), 1))
graphics.off()
## ajustado versus residuo
x = fitted(mod3); y = residuals(mod3); Rx = range(x); Ry = range(y)
pdf(file = "pred.pdf", width = 11, height = 7)
par(mar = c(3.5, 3.5, 1.2, 0.6), cex = 1.8)
plot(y ~ x, xlab = '', ylab = '', cex = 0.8, xaxt = 'n', yaxt = 'n')
mtext("Valores ajustados", side = 1, line = 2.0, cex = 1.8)
mtext("Resíduos marginais", side = 2, line =2, cex = 1.8)
abline(h = seq(Ry[1], Ry[2], l = 5), v=seq(Rx[1], Rx[2], l = 5), col = "gray", lty = "dotted")
axis(2, seq(Ry[1], Ry[2], l = 5), FF(seq(Ry[1], Ry[2], l = 5), 1))
axis(1, seq(Rx[1], Rx[2], l = 5), FF(seq(Rx[1], Rx[2], l = 5), 1))
graphics.off()
## influencia
lmer3.infl <- influence(mod3, obs=TRUE)
cook <- cooks.distance(lmer3.infl)
x = 1:nrow(dados); y = cooks.distance(lmer3.infl); Rx = range(x); Ry = range(y)
pdf(file = "cook.pdf", width = 11, height = 7)
par(mar = c(3.5, 3.5, 1.2, 0.6), cex = 1.8)
plot(y, xlab = '', ylab = '', cex = 0.8, xaxt = 'n', yaxt = 'n')
mtext("Índice das observações", side = 1, line = 2.0, cex = 1.8)
mtext("Distância de Cook", side = 2, line = 2, cex = 1.8)
abline(h = seq(Ry[1], Ry[2], l = 5), v=seq(Rx[1], Rx[2], l = 5), col = "gray", lty = "dotted")
axis(2, seq(Ry[1], Ry[2], l = 5), FF(seq(Ry[1], Ry[2], l = 5), 3))
axis(1, seq(Rx[1], Rx[2], l = 5), FF(seq(Rx[1], Rx[2], l = 5), 0))
graphics.off()
|
ad8c47cf5866a4e8ae0b9980486cf83f6692621f | 04a98a7e184fd449985628ac7b8a92f19c1785a4 | /R/clsd.R | 26dbaf3caa1bc277896344527c47f9a940da1738 | [] | no_license | JeffreyRacine/R-Package-crs | 3548a0002f136e9e7c1d5c808f6a3867b20b417e | 6112a3914e65f60a45c8bcfc4076e9b7ea1f8e7a | refs/heads/master | 2023-01-09T18:23:59.615927 | 2023-01-03T16:20:22 | 2023-01-03T16:20:22 | 1,941,853 | 12 | 6 | null | 2023-01-03T16:20:23 | 2011-06-23T14:11:06 | C++ | UTF-8 | R | false | false | 38,963 | r | clsd.R | ## These functions are for (currently univariate) logspline density
## estimation written by [email protected] (Jeffrey S. Racine). They
## make use of spline routines in the crs package (available on
## CRAN). The approach involves joint selection of the degree and
## knots in contrast to the typical approach (e.g. Kooperberg and
## Stone) that sets the degree to 3 and optimizes knots only. Though
## more computationally demanding, the estimators are more efficient
## on average.
par.init <- function(degree,segments,monotone,monotone.lb) {
## This function initializes parameters for search along with upper
## and lower bounds if appropriate.
dim.p <- degree+segments
## The weights for the linear tails must be non-positive. The lower
## bound places a maximum bound on how quickly the tails are allowed
## to die off. Trial and error suggests the values below seem to be
## appropriate for a wide range of (univariate)
## distributions. Kooperberg suggests that in order to get the
## constraint theta < 0 use theta <= -epsilon for some small epsilon
## > 0. We therefore use sqrt machine epsilon.
par.ub <- - sqrt(.Machine$double.eps)
par.lb <- monotone.lb
par.init <- c(runif(1,-10,par.ub),rnorm(dim.p-2),runif(1,-10,par.ub))
par.upper <- c(par.ub,rep(Inf,dim.p-2),par.ub)
par.lower <- if(monotone){rep(-Inf,dim.p)}else{c(par.lb,rep(-Inf,dim.p-2),par.lb)}
return(list(par.init=par.init,
par.upper=par.upper,
par.lower=par.lower))
}
gen.xnorm <- function(x=NULL,
xeval=NULL,
lbound=NULL,
ubound=NULL,
er=NULL,
n.integrate=NULL) {
er <- extendrange(x,f=er)
if(!is.null(lbound)) er[1] <- lbound
if(!is.null(ubound)) er[2] <- ubound
if(min(x) < er[1] | max(x) > er[2]) warning(" data extends beyond the range of `er'")
xint <- sort(as.numeric(c(seq(er[1],er[2],length=round(n.integrate/2)),
quantile(x,seq(sqrt(.Machine$double.eps),1-sqrt(.Machine$double.eps),length=round(n.integrate/2))))))
if(is.null(xeval)) {
xnorm <- c(x,xint)
} else {
xnorm <- c(xeval,x,xint)
if(min(xeval) < er[1] | max(xeval) > er[2]) warning(" evaluation data extends beyond the range of `er'")
}
## Either x will be the first 1:length(x) elements in
## object[rank.xnorm] or xeval will be the first 1:length(xeval)
## elements in object[rank.xnorm]
return(list(xnorm=xnorm[order(xnorm)],rank.xnorm=rank(xnorm)))
}
density.basis <- function(x=NULL,
xeval=NULL,
xnorm=xnorm,
degree=NULL,
segments=NULL,
basis="tensor",
knots="quantiles",
monotone=TRUE) {
## To obtain the constant of integration for B-spline bases, we need
## to compute log(integral exp(P%*%beta)) so we take an equally
## spaced extended range grid of length n plus the sample
## realizations (min and max of sample therefore present for what
## follows), and evaluation points xeval if they exist.
## Charles Kooperberg has a manuscript "Statistical Modeling with
## Spline Functions', Jan 5 2006 on his web page. Chapter 6, page
## 286, figure 6.7 reveals a hybrid spline basis that in essence
## appears to drop two columns from my B-spline with 2 segments
## added artificially. This has the effect of removing the two bases
## that were delivering weight in tails leading to `kinks'. Hat-tip
## to Charles for his clear descriptions. Note we require the same
## for the derivatives below. Note that this logspline basis does
## not have the B-spline property that the pointwise sum of the
## bases is 1 everywhere.
suppressWarnings(Pnorm <- prod.spline(x=x,
xeval=xnorm,
K=cbind(degree,segments+if(monotone){2}else{0}),
knots=knots,
basis=basis))
if(monotone) Pnorm <- Pnorm[,-c(2,degree+segments+1)]
## Compute the normalizing constant so that the estimate integrates
## to one. We append linear splines to the B-spline basis to
## generate exponentially declining tails (K=cbind(1,1) creates the
## linear basis).
suppressWarnings(P.lin <- prod.spline(x=x,
xeval=xnorm,
K=cbind(1,1),
knots=knots,
basis=basis))
## We append the linear basis to the left and rightmost polynomial
## bases. We match the slope of the linear basis to that of the
## polynomial basis at xmin/xmax (note that
## Pnorm[xnorm==max(x),-ncol(Pnorm)] <- 0 is there because the
## gsl.bspline values at the right endpoint are very small but not
## exactly zero but want to rule out any potential issues hence set
## them correctly to zero)
Pnorm[xnorm<min(x),] <- 0
Pnorm[xnorm>max(x),] <- 0
Pnorm[xnorm==max(x),-ncol(Pnorm)] <- 0
P.left <- as.matrix(P.lin[,1])
P.right <- as.matrix(P.lin[,2])
## We want the linear segment to have the same slope as the
## polynomial segment it connects with and to match at the joint
## hence conduct some carpentry at the left boundary.
index <- which(xnorm==min(x))
index.l <- index+1
index.u <- index+5
x.l <- xnorm[index.l]
x.u <- xnorm[index.u]
slope.poly.left <- as.numeric((Pnorm[index.u,1]-Pnorm[index.l,1])/(x.u-x.l))
index.l <- index+1
index.u <- index+5
x.l <- xnorm[index.l]
x.u <- xnorm[index.u]
slope.linear.left <- as.numeric((P.left[index.u]-P.left[index.l])/(x.u-x.l))
## Complete carpentry at the right boundary.
index <- which(xnorm==max(x))
index.l <- index-1
index.u <- index-5
x.l <- xnorm[index.l]
x.u <- xnorm[index.u]
slope.poly.right <- as.numeric((Pnorm[index.u,ncol(Pnorm)]-Pnorm[index.l,ncol(Pnorm)])/(x.u-x.l))
index.l <- index-1
index.u <- index-5
x.l <- xnorm[index.l]
x.u <- xnorm[index.u]
slope.linear.right <- as.numeric((P.right[index.u]-P.right[index.l])/(x.u-x.l))
P.left <- as.matrix(P.left-1)*slope.poly.left/slope.linear.left+1
P.right <- as.matrix(P.right-1)*slope.poly.right/slope.linear.right+1
P.left[xnorm>=min(x),1] <- 0
P.right[xnorm<=max(x),1] <- 0
Pnorm[,1] <- Pnorm[,1]+P.left
Pnorm[,ncol(Pnorm)] <- Pnorm[,ncol(Pnorm)]+P.right
return(Pnorm)
}
density.deriv.basis <- function(x=NULL,
xeval=NULL,
xnorm=xnorm,
degree=NULL,
segments=NULL,
basis="tensor",
knots="quantiles",
monotone=TRUE,
deriv.index=1,
deriv=1) {
suppressWarnings(Pnorm.deriv <- prod.spline(x=x,
xeval=xnorm,
K=cbind(degree,segments+if(monotone){2}else{0}),
knots=knots,
basis=basis,
deriv.index=deriv.index,
deriv=deriv))
if(monotone) Pnorm.deriv <- Pnorm.deriv[,-c(2,degree+segments+1)]
suppressWarnings(P.lin <- prod.spline(x=x,
xeval=xnorm,
K=cbind(1,1),
knots=knots,
basis=basis,
deriv.index=deriv.index,
deriv=deriv))
## For the derivative bases on the extended range `xnorm', above
## and below max(x)/min(x) we assign the bases to constants
## (zero). We append the linear basis to the left and right of the
## bases. The left basis takes on linear values to the left of
## min(x), zero elsewhere, the right zero to the left of max(x),
## linear elsewhere.
Pnorm.deriv[xnorm<min(x),] <- 0
Pnorm.deriv[xnorm>max(x),] <- 0
P.left <- as.matrix(P.lin[,1])
P.left[xnorm>=min(x),1] <- 0
P.right <- as.matrix(P.lin[,2])
P.right[xnorm<=max(x),1] <- 0
Pnorm.deriv[,1] <- Pnorm.deriv[,1]+P.left
Pnorm.deriv[,ncol(Pnorm.deriv)] <- Pnorm.deriv[,ncol(Pnorm.deriv)]+P.right
return(Pnorm.deriv)
}
clsd <- function(x=NULL,
beta=NULL,
xeval=NULL,
degree=NULL,
segments=NULL,
degree.min=2,
degree.max=25,
segments.min=1,
segments.max=100,
lbound=NULL,
ubound=NULL,
basis="tensor",
knots="quantiles",
penalty=NULL,
deriv.index=1,
deriv=1,
elastic.max=TRUE,
elastic.diff=3,
do.gradient=TRUE,
er=NULL,
monotone=TRUE,
monotone.lb=-250,
n.integrate=500,
nmulti=1,
method = c("L-BFGS-B", "Nelder-Mead", "BFGS", "CG", "SANN"),
verbose=FALSE,
quantile.seq=seq(.01,.99,by=.01),
random.seed=42,
maxit=10^5,
max.attempts=25,
NOMAD=FALSE) {
if(elastic.max && !NOMAD) {
degree.max <- 3
segments.max <- 3
}
ptm <- system.time("")
if(is.null(x)) stop(" You must provide data")
## If no er is provided use the following ad-hoc rule which attempts
## to ensure we cover the support of the variable for distributions
## with moments. This gets the chi-square, t, and Gaussian for n >=
## 100 with all degrees of freedom and df=1 is perhaps the worst
## case scenario. This rule delivers er = 0.43429448, 0.21714724,
## 0.14476483, 0.10857362, 0.08685890, and 0.0723824110, for n = 10,
## 10^2, 10^3, 10^4, 10^5, and 10^6. It is probably too aggressive
## for the larger samples but one can override - the code traps for
## non-finite integration and issues a message when this occurs
## along with a suggestion.
if(!is.null(er) && er < 0) stop(" er must be non-negative")
if(is.null(er)) er <- 1/log(length(x))
if(is.null(penalty)) penalty <- log(length(x))/2
method <- match.arg(method)
fv <- NULL
gen.xnorm.out <- gen.xnorm(x=x,
lbound=lbound,
ubound=ubound,
er=er,
n.integrate=n.integrate)
xnorm <- gen.xnorm.out$xnorm
rank.xnorm <- gen.xnorm.out$rank.xnorm
if(is.null(beta)) {
## If no parameters are provided presume intention is to run
## maximum likelihood estimation to obtain the parameter
## estimates.
ptm <- ptm + system.time(ls.ml.out <- ls.ml(x=x,
xnorm=xnorm,
rank.xnorm=rank.xnorm,
degree.min=degree.min,
segments.min=segments.min,
degree.max=degree.max,
segments.max=segments.max,
lbound=lbound,
ubound=ubound,
elastic.max=elastic.max,
elastic.diff=elastic.diff,
do.gradient=do.gradient,
maxit=maxit,
nmulti=nmulti,
er=er,
method=method,
n.integrate=n.integrate,
basis=basis,
knots=knots,
penalty=penalty,
monotone=monotone,
monotone.lb=monotone.lb,
verbose=verbose,
max.attempts=max.attempts,
random.seed=random.seed,
NOMAD=NOMAD))
beta <- ls.ml.out$beta
degree <- ls.ml.out$degree
segments <- ls.ml.out$segments
fv <- ls.ml.out$fv
}
if(!is.null(xeval)) {
gen.xnorm.out <- gen.xnorm(x=x,
xeval=xeval,
lbound=lbound,
ubound=ubound,
er=er,
n.integrate=n.integrate)
xnorm <- gen.xnorm.out$xnorm
rank.xnorm <- gen.xnorm.out$rank.xnorm
}
if(is.null(degree)) stop(" You must provide spline degree")
if(is.null(segments)) stop(" You must provide number of segments")
ptm <- ptm + system.time(Pnorm <- density.basis(x=x,
xeval=xeval,
xnorm=xnorm,
degree=degree,
segments=segments,
basis=basis,
knots=knots,
monotone=monotone))
if(ncol(Pnorm)!=length(beta)) stop(paste(" Incompatible arguments: beta must be of dimension ",ncol(Pnorm),sep=""))
Pnorm.beta <- as.numeric(Pnorm%*%as.matrix(beta))
## Compute the constant of integration to normalize the density
## estimate so that it integrates to one.
norm.constant <- integrate.trapezoidal.sum(xnorm,exp(Pnorm.beta))
log.norm.constant <- log(norm.constant)
if(!is.finite(log.norm.constant))
stop(paste(" integration not finite - perhaps try reducing `er' (current value = ",round(er,3),")",sep=""))
## For the distribution, compute the density over the extended
## range, then return values corresponding to either the sample x or
## evaluation x (xeval) based on integration over the extended range
## for the xnorm points (xnorm contains x and xeval - this ought to
## ensure integration to one).
## f.norm is the density evaluated on the extended range (including
## sample observations and evaluation points if the latter exist),
## F.norm the distribution evaluated on the extended range.
f.norm <- exp(Pnorm.beta-log.norm.constant)
F.norm <- integrate.trapezoidal(xnorm,f.norm)
if(deriv > 0) {
ptm <- ptm + system.time(Pnorm.deriv <- density.deriv.basis(x=x,
xeval=xeval,
xnorm=xnorm,
degree=degree,
segments=segments,
basis=basis,
knots=knots,
monotone=monotone,
deriv.index=deriv.index,
deriv=deriv))
f.norm.deriv <- as.numeric(f.norm*Pnorm.deriv%*%beta)
} else {
f.deriv <- NULL
f.norm.deriv <- NULL
}
## Compute quantiles using the the quasi-inverse (Definition 2.3.6,
## Nelson (2006))
quantile.vec <- numeric(length(quantile.seq))
for(i in 1:length(quantile.seq)) {
if(quantile.seq[i]>=0.5) {
quantile.vec[i] <- max(xnorm[F.norm<=quantile.seq[i]])
} else {
quantile.vec[i] <- min(xnorm[F.norm>=quantile.seq[i]])
}
}
## Next, strip off the values of the distribution corresponding to
## either sample x or evaluation xeval
if(is.null(xeval)) {
f <- f.norm[rank.xnorm][1:length(x)]
F <- F.norm[rank.xnorm][1:length(x)]
f.norm <- f.norm[rank.xnorm][(length(x)+1):length(f.norm)]
F.norm <- F.norm[rank.xnorm][(length(x)+1):length(F.norm)]
xnorm <- xnorm[rank.xnorm][(length(x)+1):length(xnorm)]
if(deriv>0) {
f.deriv <- f.norm.deriv[rank.xnorm][1:length(x)]
f.norm.deriv <- f.norm.deriv[rank.xnorm][(length(x)+1):length(f.norm.deriv)]
}
P <- Pnorm[rank.xnorm,][1:length(x),]
P.beta <- Pnorm.beta[rank.xnorm][1:length(x)]
} else {
f <- f.norm[rank.xnorm][1:length(xeval)]
F <- F.norm[rank.xnorm][1:length(xeval)]
f.norm <- f.norm[rank.xnorm][(length(x)+length(xeval)+1):length(f.norm)]
F.norm <- F.norm[rank.xnorm][(length(x)+length(xeval)+1):length(F.norm)]
xnorm <- xnorm[rank.xnorm][(length(x)+length(xeval)+1):length(xnorm)]
if(deriv>0) {
f.deriv <- f.norm.deriv[rank.xnorm][1:length(xeval)]
f.norm.deriv <- f.norm.deriv[rank.xnorm][(length(x)+length(xeval)+1):length(f.norm.deriv)]
}
P <- Pnorm[rank.xnorm,][1:length(xeval),]
P.beta <- Pnorm.beta[rank.xnorm][1:length(xeval)]
}
clsd.return <- list(density=f,
density.deriv=f.deriv,
distribution=F,
density.er=f.norm,
density.deriv.er=f.norm.deriv,
distribution.er=F.norm,
xer=xnorm,
Basis.beta=P.beta,
Basis.beta.er=Pnorm.beta,
P=P,
Per=Pnorm,
logl=sum(P.beta-log.norm.constant),
constant=norm.constant,
degree=degree,
segments=segments,
knots=knots,
basis=basis,
nobs=length(x),
beta=beta,
fv=fv,
er=er,
penalty=penalty,
nmulti=nmulti,
x=x,
xq=quantile.vec,
tau=quantile.seq,
ptm=ptm)
class(clsd.return) <- "clsd"
return(clsd.return)
}
sum.log.density <- function(beta=NULL,
P=NULL,
Pint=NULL,
xint=NULL,
length.x=NULL,
penalty=NULL,
complexity=NULL,
...) {
return(2*sum(P%*%beta)-2*length.x*log(integrate.trapezoidal.sum(xint,exp(Pint%*%beta)))-penalty*complexity)
}
sum.log.density.gradient <- function(beta=NULL,
colSumsP=NULL,
Pint=NULL,
xint=NULL,
length.x=NULL,
penalty=NULL,
complexity=NULL,
...) {
exp.Pint.beta <- as.numeric(exp(Pint%*%beta))
exp.Pint.beta.Pint <- exp.Pint.beta*Pint
int.exp.Pint.beta.Pint <- numeric()
for(i in 1:complexity) int.exp.Pint.beta.Pint[i] <- integrate.trapezoidal.sum(xint,exp.Pint.beta.Pint[,i])
return(2*(colSumsP-length.x*int.exp.Pint.beta.Pint/integrate.trapezoidal.sum(xint,exp.Pint.beta)))
}
ls.ml <- function(x=NULL,
xnorm=NULL,
rank.xnorm=NULL,
degree.min=NULL,
segments.min=NULL,
degree.max=NULL,
segments.max=NULL,
lbound=NULL,
ubound=NULL,
elastic.max=FALSE,
elastic.diff=NULL,
do.gradient=TRUE,
maxit=NULL,
nmulti=NULL,
er=NULL,
method=NULL,
n.integrate=NULL,
basis=NULL,
knots=NULL,
penalty=NULL,
monotone=TRUE,
monotone.lb=NULL,
verbose=NULL,
max.attempts=NULL,
random.seed=NULL,
NOMAD=FALSE) {
## This function conducts log spline maximum
## likelihood. Multistarting is supported as is breaking out to
## potentially avoid wasted computation (be careful when using this,
## however, as it is prone to stopping early).
## Save seed prior to setting
if(exists(".Random.seed", .GlobalEnv)) {
save.seed <- get(".Random.seed", .GlobalEnv)
exists.seed = TRUE
} else {
exists.seed = FALSE
}
set.seed(random.seed)
if(missing(x)) stop(" You must provide data")
if(!NOMAD) {
## We set some initial parameters that are placeholders to get
## things rolling.
d.opt <- Inf
s.opt <- Inf
par.opt <- Inf
value.opt <- -Inf
length.x <- length(x)
length.xnorm <- length(xnorm)
## Loop through all degrees for every segment starting at
## segments.min.
d <- degree.min
while(d <= degree.max) {
## For smooth densities one can simply restrict degree to at least
## 2 (or 3 to be consistent with cubic splines)
s <- segments.min
while(s <= segments.max) {
if(options('crs.messages')$crs.messages) {
if(verbose) cat("\n")
cat("\r ")
cat("\rOptimizing, degree = ",d,", segments = ",s,", degree.opt = ",d.opt, ", segments.opt = ",s.opt," ",sep="")
}
## Generate objects that need not be recomputed for a given d
## and s
Pnorm <- density.basis(x=x,
xnorm=xnorm,
degree=d,
segments=s,
basis=basis,
knots=knots,
monotone=monotone)
P <- Pnorm[rank.xnorm,][1:length.x,]
colSumsP <- colSums(P)
Pint <- Pnorm[rank.xnorm,][(length.x+1):nrow(Pnorm),]
xint <- xnorm[rank.xnorm][(length.x+1):nrow(Pnorm)]
complexity <- d+s
## Multistart if desired.
for(n in 1:nmulti) {
## Can restart to see if we can improve on min... note initial
## values totally ad-hoc...
par.init.out <- par.init(d,s,monotone,monotone.lb)
par.init <- par.init.out$par.init
par.upper <- par.init.out$par.upper
par.lower <- par.init.out$par.lower
## Trap non-convergence, restart from different initial
## points, display message if needed (trace>0 up to 6 provides
## ever more detailed information for L-BFGS-B)
optim.out <- list()
optim.out[[4]] <- 9999
optim.out$value <- -Inf
m.attempts <- 0
while(tryCatch(suppressWarnings(optim.out <- optim(par=par.init,
fn=sum.log.density,
gr=if(do.gradient){sum.log.density.gradient}else{NULL},
upper=par.upper,
lower=par.lower,
method=method,
penalty=penalty,
P=P,
colSumsP=colSumsP,
Pint=Pint,
length.x=length.x,
xint=xint,
complexity=complexity,
control=list(fnscale=-1,maxit=maxit,if(verbose){trace=1}else{trace=0}))),
error = function(e){return(optim.out)})[[4]]!=0 && m.attempts < max.attempts){
## If optim fails to converge, reset initial parameters and
## try again.
if(options('crs.messages')$crs.messages) {
if(verbose && optim.out[[4]]!=0) {
if(!is.null(optim.out$message)) cat("\n optim message = ",optim.out$message,sep="")
cat("\n optim failed (degree = ",d,", segments = ",s,", convergence = ", optim.out[[4]],") re-running with new initial values",sep="")
}
}
par.init.out <- par.init(d,s,monotone,monotone.lb)
par.init <- par.init.out$par.init
par.upper <- par.init.out$par.upper
par.lower <- par.init.out$par.lower
m.attempts <- m.attempts+1
}
## Check for a new optimum, overwrite existing values with
## new values.
if(optim.out$value > value.opt) {
if(options('crs.messages')$crs.messages) {
if(verbose && n==1) cat("\n optim improved: d = ",d,", s = ",s,", old = ",formatC(value.opt,format="g",digits=6),", new = ",formatC(optim.out$value,format="g",digits=6),", diff = ",formatC(optim.out$value-value.opt,format="g",digits=6),sep="")
if(verbose && n>1) cat("\n optim improved (ms ",n,"/",nmulti,"): d = ",d,", s = ",s,", old = ",formatC(value.opt,format="g",digits=6),", new = ",formatC(optim.out$value,format="g",digits=6),", diff = ",formatC(optim.out$value-value.opt,format="g",digits=6),sep="")
}
par.opt <- optim.out$par
d.opt <- d
s.opt <- s
value.opt <- optim.out$value
}
}
if(!(segments.min==segments.max) && elastic.max && s.opt == segments.max) segments.max <- segments.max+elastic.diff
if(!(segments.min==segments.max) && elastic.max && s.opt < segments.max+elastic.diff) segments.max <- s.opt+elastic.diff
s <- s+1
}
d <- d+1
if(!(degree.min==degree.max) && elastic.max && d.opt == degree.max) degree.max <- degree.max+elastic.diff
if(!(degree.min==degree.max) && elastic.max && d.opt < degree.max+elastic.diff) degree.max <- d.opt+elastic.diff
}
} else {
eval.f <- function(input, params) {
sum.log.density <- params$sum.log.density
sum.log.density.gradient <- params$sum.log.density.gradient
method <- params$method
penalty <- params$penalty
x <- params$x
xnorm <- params$xnorm
knots <- params$knots
basis <- params$basis
monotone <- params$monotone
monotone.lb <- params$monotone.lb
rank.xnorm <- params$rank.xnorm
do.gradient <- params$do.gradient
maxit <- params$maxit
max.attempts <- params$max.attempts
verbose <- params$verbose
length.x <- length(x)
length.xnorm <- length(xnorm)
d <- input[1]
s <- input[2]
complexity <- d+s
Pnorm <- density.basis(x=x,
xnorm=xnorm,
degree=d,
segments=s,
basis=basis,
knots=knots,
monotone=monotone)
P <- Pnorm[rank.xnorm,][1:length.x,]
colSumsP <- colSums(P)
Pint <- Pnorm[rank.xnorm,][(length.x+1):nrow(Pnorm),]
xint <- xnorm[rank.xnorm][(length.x+1):nrow(Pnorm)]
## NOMAD minimizes only
optim.out <- list()
optim.out[[4]] <- 9999
optim.out$value <- -Inf
m.attempts <- 0
while(tryCatch(suppressWarnings(optim.out <- optim(par=par.init,
fn=sum.log.density,
gr=if(do.gradient){sum.log.density.gradient}else{NULL},
upper=par.upper,
lower=par.lower,
method=method,
penalty=penalty,
P=P,
colSumsP=colSumsP,
Pint=Pint,
length.x=length.x,
xint=xint,
complexity=complexity,
control=list(fnscale=-1,maxit=maxit,if(verbose){trace=1}else{trace=0}))),
error = function(e){return(optim.out)})[[4]]!=0 && m.attempts < max.attempts){
## If optim fails to converge, reset initial parameters and
## try again.
if(verbose && optim.out[[4]]!=0) {
if(options('crs.messages')$crs.messages) {
if(!is.null(optim.out$message)) cat("\n optim message = ",optim.out$message,sep="")
cat("\n optim failed (degree = ",d,", segments = ",s,", convergence = ", optim.out[[4]],") re-running with new initial values",sep="")
}
}
par.init.out <- par.init(d,s,monotone,monotone.lb)
par.init <- par.init.out$par.init
par.upper <- par.init.out$par.upper
par.lower <- par.init.out$par.lower
m.attempts <- m.attempts+1
}
if(options('crs.messages')$crs.messages) {
if(verbose) cat("\n")
cat("\r ")
cat("\rOptimizing, degree = ",d,", segments = ",s,", log likelihood = ",optim.out$value,sep="")
}
fv <- -optim.out$value
}
## Initial values
x0 <- c(degree.min,segments.min)
## Types of variables
bbin <-c(1, 1)
## Bounds
lb <- c(degree.min,segments.min)
ub <- c(degree.max,segments.max)
## Type of output
bbout <- c(0, 2, 1)
## Options
opts <-list("MAX_BB_EVAL"=10000)
## Generate params
params <- list()
params$sum.log.density <- sum.log.density
params$sum.log.density.gradient <- sum.log.density.gradient
params$method <- method
params$penalty <- penalty
params$x <- x
params$xnorm <- xnorm
params$knots <- knots
params$basis <- basis
params$monotone <- monotone
params$monotone.lb <- monotone.lb
params$rank.xnorm <- rank.xnorm
params$do.gradient <- do.gradient
params$maxit <- maxit
params$max.attempts <- max.attempts
params$verbose <- verbose
solution <- snomadr(eval.f=eval.f,
n=2,## number of variables
x0=x0,
bbin=bbin,
bbout=bbout,
lb=lb,
ub=ub,
nmulti=nmulti,
print.output=FALSE,
opts=opts,
params=params)
value.opt <- solution$objective
d <- solution$solution[1]
s <- solution$solution[2]
## Final call to optim to retrieve beta
length.x <- length(x)
length.xnorm <- length(xnorm)
complexity <- d+s
par.init.out <- par.init(d,s,monotone,monotone.lb)
par.init <- par.init.out$par.init
par.upper <- par.init.out$par.upper
par.lower <- par.init.out$par.lower
Pnorm <- density.basis(x=x,
xnorm=xnorm,
degree=d,
segments=s,
basis=basis,
knots=knots,
monotone=monotone)
P <- Pnorm[rank.xnorm,][1:length.x,]
colSumsP <- colSums(P)
Pint <- Pnorm[rank.xnorm,][(length.x+1):nrow(Pnorm),]
xint <- xnorm[rank.xnorm][(length.x+1):nrow(Pnorm)]
optim.out <- list()
optim.out[[4]] <- 9999
optim.out$value <- -Inf
m.attempts <- 0
while(tryCatch(suppressWarnings(optim.out <- optim(par=par.init,
fn=sum.log.density,
gr=if(do.gradient){sum.log.density.gradient}else{NULL},
upper=par.upper,
lower=par.lower,
method=method,
penalty=penalty,
P=P,
colSumsP=colSumsP,
Pint=Pint,
length.x=length.x,
xint=xint,
complexity=complexity,
control=list(fnscale=-1,maxit=maxit,if(verbose){trace=1}else{trace=0}))),
error = function(e){return(optim.out)})[[4]]!=0 && m.attempts < max.attempts){
## If optim fails to converge, reset initial parameters and
## try again.
if(verbose && optim.out[[4]]!=0) {
if(options('crs.messages')$crs.messages) {
if(!is.null(optim.out$message)) cat("\n optim message = ",optim.out$message,sep="")
cat("\n optim failed (degree = ",d,", segments = ",s,", convergence = ", optim.out[[4]],") re-running with new initial values",sep="")
}
}
par.init.out <- par.init(d,s,monotone,monotone.lb)
par.init <- par.init.out$par.init
par.upper <- par.init.out$par.upper
par.lower <- par.init.out$par.lower
m.attempts <- m.attempts+1
}
d.opt <- d
s.opt <- s
par.opt <- optim.out$par
value.opt <- optim.out$value
}
if(options('crs.messages')$crs.messages) {
cat("\r ")
if(!(degree.min==degree.max) && (d.opt==degree.max)) warning(paste(" optimal degree equals search maximum (", d.opt,"): rerun with larger degree.max",sep=""))
if(!(segments.min==segments.max) && (s.opt==segments.max)) warning(paste(" optimal segment equals search maximum (", s.opt,"): rerun with larger segments.max",sep=""))
if(par.opt[1]>0|par.opt[length(par.opt)]>0) warning(" optim() delivered a positive weight for linear segment (supposed to be negative)")
if(!monotone&&par.opt[1]<=monotone.lb) warning(paste(" optimal weight for left nonmonotone basis equals search minimum (",par.opt[1],"): rerun with smaller monotone.lb",sep=""))
if(!monotone&&par.opt[length(par.opt)]<=monotone.lb) warning(paste(" optimal weight for right nonmonotone basis equals search minimum (",par.opt[length(par.opt)],"): rerun with smaller monotone.lb",sep=""))
}
## Restore seed
if(exists.seed) assign(".Random.seed", save.seed, .GlobalEnv)
return(list(degree=d.opt,segments=s.opt,beta=par.opt,fv=value.opt))
}
summary.clsd <- function(object,
...) {
cat("\nCategorical Logspline Density\n",sep="")
cat(paste("\nModel penalty: ", format(object$penalty), sep=""))
cat(paste("\nModel degree/segments: ", format(object$degree),"/",format(object$segments), sep=""))
cat(paste("\nKnot type: ", format(object$knots), sep=""))
cat(paste("\nBasis type: ",format(object$basis),sep=""))
cat(paste("\nTraining observations: ", format(object$nobs), sep=""))
cat(paste("\nLog-likelihood: ", format(object$logl), sep=""))
cat(paste("\nNumber of multistarts: ", format(object$nmulti), sep=""))
cat(paste("\nEstimation time: ", formatC(object$ptm[1],digits=1,format="f"), " seconds",sep=""))
cat("\n\n")
}
print.clsd <- function(x,...)
{
summary.clsd(x)
}
plot.clsd <- function(x,
er=TRUE,
distribution=FALSE,
derivative=FALSE,
ylim,
ylab,
xlab,
type,
...) {
if(missing(xlab)) xlab <- "Data"
if(missing(type)) type <- "l"
if(!er) {
order.x <- order(x$x)
if(distribution){
y <- x$distribution[order.x]
if(missing(ylab)) ylab <- "Distribution"
if(missing(ylim)) ylim <- c(0,1)
}
if(!distribution&&!derivative) {
y <- x$density[order.x]
if(missing(ylab)) ylab <- "Density"
if(missing(ylim)) ylim <- c(0,max(y))
}
if(derivative) {
y <- x$density.deriv[order.x]
if(missing(ylab)) ylab <- "Density Derivative"
if(missing(ylim)) ylim <- c(min(y),max(y))
}
x <- x$x[order.x]
} else {
order.xer <- order(x$xer)
if(distribution){
y <- x$distribution.er[order.xer]
if(missing(ylab)) ylab <- "Distribution"
if(missing(ylim)) ylim <- c(0,1)
}
if(!distribution&&!derivative) {
y <- x$density.er[order.xer]
if(missing(ylab)) ylab <- "Density"
if(missing(ylim)) ylim <- c(0,max(y))
}
if(derivative){
y <- x$density.deriv.er[order.xer]
if(missing(ylab)) ylab <- "Density Derivative"
if(missing(ylim)) ylim <- c(min(y),max(y))
}
x <- x$xer[order.xer]
}
x <- plot(x,
y,
ylim=ylim,
ylab=ylab,
xlab=xlab,
type=type,
...)
}
coef.clsd <- function(object, ...) {
tc <- object$beta
return(tc)
}
fitted.clsd <- function(object, ...){
object$density
}
|
34045744083b6a451af5c1e46238c58314740b16 | 928176f46b5551d2e0af8ca160f06caae49b2303 | /get_mle_pure_r_code.R | 6f985c3fbd127ab96d38b2c97f06e9bee72527f9 | [] | no_license | tianqinglong/bootstrap_prediction | 8632c54c76f9635614084157a80cc4d487bdb0ee | 4a2ba7db7b57a6694cdda8d0912d8109aae40fdd | refs/heads/master | 2020-07-03T17:59:00.034956 | 2019-08-23T01:20:28 | 2019-08-23T01:20:28 | 201,996,897 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 563 | r | get_mle_pure_r_code.R | get_Weibull_mle_R <- function(censor_data) {
n_minus_r <- censor_data[[4]] - censor_data[[1]]
sobj <- Surv(time = c( censor_data[[3]],
rep(censor_data[[2]], n_minus_r)
),
event = c( rep(1, censor_data[[1]] ),
rep(0, n_minus_r)
),
type = 'right'
)
sfit <- survreg(sobj~1, dist = 'weibull')
return( list( Shape_mle = 1/sfit$scale,
Scale_mle = as.numeric( exp(sfit$coefficients) )))
} |
f982dd38b90c24f19f65c15eb8b122712fb30623 | db4d0b8b4fe2601054f07bffe7f86c252e4a0e99 | /explorar_SpatialPolygonsDataFrame.r | bb1d4d9aabe26e3e5c3a6848e054b9fa708e57ea | [] | no_license | manuelcampagnolo/vector_datasets_R | f5aaf1fb58d87e8d85143135147f141e10538110 | ca22950ba05ac00fab78140c5408ea58ae9107ae | refs/heads/master | 2016-09-03T07:40:26.430196 | 2015-06-22T01:36:26 | 2015-06-22T01:36:26 | 37,262,455 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,849 | r | explorar_SpatialPolygonsDataFrame.r | # ler shapefile de areas protegidas (ICNF)
icnf<-readOGR(dsn=getwd(),layer="AP_JUL_2014",encoding="ISO8859-1")
plot(icnf)
# qual é o CRS do cdg?
icnf@proj4string
# quantos multi-poligonos há?
length(icnf@polygons)
# como são as primeiras linhas da tabela de atributos?
head(icnf@data)
# procurar a linha da tabela de atributos que tem Montejunto no atributo Nome
indice<-which(grepl(pattern="Montejunto",icnf@data$NOME,ignore.case = TRUE))
# verificar que é a 12a linha
icnf@data[12,]
# representar esse 12o multi-polígono de icnf
# selecciona-se um subconjunto dos polígono seleccionando a(s) linha(s) como para a tabela de atributos
class(icnf[12,]) # ainda é SpatialPolygonsDataFrame
# obter informação sobre o 12o multi-polígono:
# ID do multi-polígono
icnf@polygons[[12]]@ID
# area do multi-polígono
icnf@polygons[[12]]@area
# quantas partes tem o 12o multi-poligono de icnf?
length(icnf@polygons[[12]]@Polygons) #tem 6 partes
#qual é o tipo de cada parte: "hole" ou não?
for (i in 1:6) print(icnf@polygons[[12]]@Polygons[[i]]@hole)
# qual é a área de cada parte do 12o multi-poligono?
for (i in 1:6) print(icnf@polygons[[12]]@Polygons[[i]]@area)
# quais são as coordenadas dos pontos que delimitam a 3a parte do 12o multi-polígono?
pol3 <- icnf@polygons[[12]]@Polygons[[3]]@coords # matriz com 2 colunas
# construir imagem
if (export) png(paste(aulas,"ap_montejunto.png",sep="\\"), width=800, height=600, res=120)
plot(icnf[12,])
text(x=icnf[12,]@bbox["x","min"], y=icnf[12,]@bbox["y","max"], as.character(icnf@data[12,"NOME"]),pos=4,cex=.9)
for (i in 1:length(icnf@polygons[[12]]@Polygons))
{
aux<-icnf@polygons[[12]]@Polygons[[i]];
if (aux@hole) polygon(aux@coords,col="yellow")
text(x=aux@labpt[1], y=aux@labpt[2],paste(round(aux@area/10000,1),"ha",sep=""),cex=.7,pos=4)
}
if (export) graphics.off()
|
ddc2d036394d28351b0b08666555189595167a36 | 8b6da8afa2945d53aea5380957b5714b74e732b6 | /plot3.R | 561dcab8559a392e4533ea6755a363abb7890889 | [] | no_license | neoeahit/ExData_Plotting1 | a9cf4c7b95e56bd5695f5fdd0a9524c89a167d5e | 42063482f7e35778c6cc6ddcabf7940d4c449e78 | refs/heads/master | 2021-01-22T05:43:25.381771 | 2017-02-13T06:36:59 | 2017-02-13T06:36:59 | 81,690,572 | 0 | 0 | null | 2017-02-11T23:45:30 | 2017-02-11T23:45:29 | null | UTF-8 | R | false | false | 392 | r | plot3.R | data=read_data()
png("plot3.png", height=480, width=480)
with(data, plot(Time, Sub_metering_1, type="l",ylab="Energy sub metering", xlab=" ", col="black"))
with(data, lines(Time, Sub_metering_2, col="red"))
with(data, lines(Time, Sub_metering_3, col="blue"))
legend(x="topright", lwd=1, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"))
dev.off()
|
be3be50699979a29d74330b627a0d96fa4c94f08 | 6e5d78bb8fe6d0026e110a6c29c60a012f16e1ff | /Data Mining Course/9. support vector machines.R | ff88d3f146493b9636db76720843dfad3c7e08f5 | [] | no_license | richarddeng88/Advanced_Data_Mining | b2d2b91e9a6f100fc4db8be0c6a53140ee64e6fe | ef386c9fa5293ad0ea69b779b36251b15f8b59f0 | refs/heads/master | 2021-01-15T15:45:23.576894 | 2016-10-22T22:02:42 | 2016-10-22T22:02:42 | 47,933,660 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 569 | r | 9. support vector machines.R | #================================= support vector classifier========================================
set.seed(1)
x <- matrix(rnorm(20*2), ncol=2)
y <- c(rep(-1,10), rep(1,10))
x[y==1,] <- x[y==1,]+1
# we check weather the classes are linearly seperable.
plot(x, col=c(4,2))
## we encode the response as a factor and conbine the variable and response together.
dat <- data.frame(x=x, y=as.factor(y))
library(e1071)
sumfit <- svm(y~., data=dat, kernel="linear", cost=1000, scale=F)
# let us plot the support vector classifier obtained above.
plot(sumfit, dat)
|
668ff8306e879494bacdc420772840578efbe1d2 | 7c084e50e556bc0468b4dde2852d65f44df13e41 | /in_progress/models/costBenefitAnalysis/scriptsForPaper/interventionGroupsPresenter.R | 781c717a62ab17ec7de8479fd129a2b96ba8abc5 | [] | no_license | mmcdermott/disease-modeling | 0d2379bb2d2a41ecf120fd5476b8768c76a10fd0 | 2d0eb0caba95216718d60ab9a2b6706021121f3d | refs/heads/master | 2016-09-06T09:18:23.086497 | 2014-08-03T23:23:22 | 2014-08-03T23:23:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,860 | r | interventionGroupsPresenter.R | library(ggplot2)
source('interventionGroups.R')
source('deSolConstants.R')
#Title Generation:
plotTitle <- function(base,interventionName,final="") {
if (final != "") {
return(ggtitle(paste(c(base,interventionName,final),collapse=" ")))
} else {
return(ggtitle(paste(c(base,interventionName),collapse=" ")))
}
}
baseData <- read.csv(baseFile)
#Base Incidence
baseInc <- generateIncidence(baseData)
#US Health Care System (HCS) TB costs due to base system
baseHCSCost <- (baseData$cN0 + baseData$cN1)/1e9
baseCasesD <- 1e6*(baseData$progTotalD0 + baseData$progTotalD1)
#Data Labels
USB <- rep("USB", totT)
FB <- rep("FB", totT)
all <- rep("All", totT)
noInt <- rep("No Intervention", totT)
int <- rep("Intervention", totT)
int10 <- rep("10% Cured", totT)
int25 <- rep("25% Cured", totT)
int50 <- rep("50% Cured", totT)
int100 <- rep("100% Cured", totT)
savings <- rep("Savings", totT)
costs <- rep("Implementation Cost", totT)
totalCosts <- rep("US HCS Cost", totT)
averted <- rep("Cases Averted", totT)
TBdeathsAverted <- rep("TB Deaths Averted", totT)
redEnLTBI100L <- rep("100% reduction", totT)
redEnLTBI75L <- rep("75% reduction", totT)
redEnLTBI50L <- rep("50% reduction", totT)
redEnLTBI25L <- rep("25% reduction", totT)
redEnLTBI10L <- rep("10% reduction", totT)
#Aesthetics
USBC <- 'blue'
FBC <- 'green'
allC <- 'red'
noIntC <- 'black'
intC <- 'blue'
savingsC <- '#24913C'
costsC <- '#9F0013'
avertedC <- '#24913C'
TBdeathsAvertedC <- '#24913C'
# Creating our data containers.
rawData <- as.list(paperRedEnLTBIInts)
incidence <- as.list(paperRedEnLTBIInts)
HCSCost <- as.list(paperRedEnLTBIInts)
costOfInter <- as.list(paperRedEnLTBIInts)
saveOfInter <- as.list(paperRedEnLTBIInts)
interTot <- as.list(paperRedEnLTBIInts)
totSpent <- as.list(paperRedEnLTBIInts)
cpca <- as.list(paperRedEnLTBIInts)
casesAverted <- as.list(paperRedEnLTBIInts)
# Naming them for convenience.
names(rawData) <- paperRedEnLTBIInts
names(incidence) <- paperRedEnLTBIInts
names(HCSCost) <- paperRedEnLTBIInts
names(costOfInter) <- paperRedEnLTBIInts
names(saveOfInter) <- paperRedEnLTBIInts
names(interTot) <- paperRedEnLTBIInts
names(totSpent) <- paperRedEnLTBIInts
names(cpca) <- paperRedEnLTBIInts
names(casesAverted) <- paperRedEnLTBIInts
for (intervention in paperRedEnLTBIInts) {
# First, grab the base data and incidence
rawData[[intervention]] <-
read.csv(paste(c(intFilePrefix,intervention, intFileSuffix),collapse=""))
incidence[[intervention]] <-
generateIncidence(rawData[[intervention]])
#HCS cost borne by intervention
HCSCost[[intervention]] <- (rawData[[intervention]]$cN0 +
rawData[[intervention]]$cN1)/1e9
#Implementation cost of intervention
costOfInter[[intervention]] <-
(rawData[[intervention]]$interventionCost)/1e9
#Savings from intervention
saveOfInter[[intervention]] <- baseHCSCost - HCSCost[[intervention]]
#Total US HCS cost due to intervention
interTot[[intervention]] <- HCSCost[[intervention]] +
costOfInter[[intervention]]
#Total additional spent by US HCS due to intervention
totSpent[[intervention]] <- interTot[[intervention]] - baseHCSCost
#Cost per cases averted
intCasesD <- 1e6*(rawData[[intervention]]$progTotalD0 + rawData[[intervention]]$progTotalD1)
casesAverted[[intervention]] <- baseCasesD - intCasesD
cpca[[intervention]] <-
1e9*totSpent[[intervention]]/casesAverted[[intervention]]
}
#Incidence Reports: Comparing Baseline Incidence against Intervention Incidence
incData <- data.frame(year = years,
baseUSB=baseInc$IN0,
baseFB =baseInc$IN1,
baseAll=baseInc$INall,
redEnLTBI10USB=incidence[["redEnLTBI10"]]$IN0,
redEnLTBI10FB =incidence[["redEnLTBI10"]]$IN1,
redEnLTBI10All=incidence[["redEnLTBI10"]]$INall,
redEnLTBI25USB=incidence[["redEnLTBI25"]]$IN0,
redEnLTBI25FB =incidence[["redEnLTBI25"]]$IN1,
redEnLTBI25All=incidence[["redEnLTBI25"]]$INall,
redEnLTBI50USB=incidence[["redEnLTBI50"]]$IN0,
redEnLTBI50FB =incidence[["redEnLTBI50"]]$IN1,
redEnLTBI50All=incidence[["redEnLTBI50"]]$INall,
redEnLTBI100USB=incidence[["redEnLTBI100"]]$IN0,
redEnLTBI100FB =incidence[["redEnLTBI100"]]$IN1,
redEnLTBI100All=incidence[["redEnLTBI100"]]$INall)
incPlot <- ggplot(incData, aes(x=year)) +
scale_y_log10(breaks=c(1,2,5,10,25,50,100,200),
labels=c("Elimination (1)",2,5,10,25,50,100,200),
limits=c(0.5,250)) +
labs(x="Years", y="Incidence/Million", color="Population",
linetype="Intervention Status") +
ggtitle("Incidence/Million with Various Immigrating LTBI Cure Rates") +
geom_line(aes(y=baseUSB, color=USB, linetype=noInt)) +
geom_line(aes(y=redEnLTBI10USB, color=USB, linetype=int10)) +
geom_line(aes(y=redEnLTBI25USB, color=USB, linetype=int25)) +
geom_line(aes(y=redEnLTBI50USB, color=USB, linetype=int50)) +
geom_line(aes(y=redEnLTBI100USB, color=USB, linetype=int100)) +
geom_line(aes(y=baseFB, color=FB, linetype=noInt)) +
geom_line(aes(y=redEnLTBI10FB, color=FB, linetype=int10)) +
geom_line(aes(y=redEnLTBI25FB, color=FB, linetype=int25)) +
geom_line(aes(y=redEnLTBI50FB, color=FB, linetype=int50)) +
geom_line(aes(y=redEnLTBI100FB, color=FB, linetype=int100)) +
geom_line(aes(y=baseAll, color=all, linetype=noInt)) +
geom_line(aes(y=redEnLTBI10All, color=all, linetype=int10)) +
geom_line(aes(y=redEnLTBI25All, color=all, linetype=int25)) +
geom_line(aes(y=redEnLTBI50All, color=all, linetype=int50)) +
geom_line(aes(y=redEnLTBI100All, color=all, linetype=int100)) +
theme(axis.title=element_text(size=16),axis.text=element_text(size=15),
plot.title=element_text(size=18))#,legend.key.height =
#unit(1.8,'line'))
#Total Costs Excluding Sticker Price: Comparing costs of various interventions
#US Health Care System (HCS) TB costs due to base system
# baseCost <- (baseData$cN0 + baseData$cN1)/1e9
# #US HCS TB costs due to intervenvtion
# interCost <- (rawData$cN0 + rawData$cN1)/1e9
# #US HCS TB savings due to intervention
# totSaved <- baseCost - interCost
#
# savingsData <- data.frame(year=years, baseCost=baseCost, interCost=interCost,
# totSaved=totSaved)
# yrange <- round(seq(min(savingsData$baseCost),max(savingsData$baseCost),by=0.5),1)
# savingsPlot <- ggplot(savingsData,aes(x=year)) +
# labs(x="Years", y="Billions of USD", color="Intervention Status") +
# scale_y_continuous(breaks=yrange) +
# plotTitle("Total Saved by US Health Care System given
# Intervention",interventionName,
# "ignoring intervention cost") +
# geom_ribbon(aes(ymin=interCost,ymax=baseCost,fill=savings, alpha=0.2)) +
# geom_line(aes(y=baseCost, color=noInt)) +
# geom_line(aes(y=interCost, color=int)) +
# geom_line(aes(y=totSaved, color=savings)) +
# scale_fill_manual(values=c(savingsC)) +
# scale_color_manual(values=c(intC,noIntC,savingsC)) +
# guides(fill=F, alpha=F)
#
# #Total Costs Including Sticker Price: Comparing costs of various interventions
# #Implementation cost of intervention
# costInter <- (rawData$interventionCost)/1e9
# #Total US HCS cost due to intervention
# interTot <- interCost + costInter
# #Total additional spent by US HCS due to intervention
# totSpent <- interTot - baseCost
#
# costData <- data.frame(year=years, baseCost=baseCost, interCost=interTot,
# totSpent=totSpent)
# yrange <- round(seq(min(costData$interCost),max(costData$interCost)+0.5,by=0.5),1)
# costsPlot <- ggplot(costData,aes(x=year)) +
# labs(x="Years", y="Billions of USD", color="Intervention Status") +
# scale_y_continuous(breaks=yrange) +
# plotTitle("Total Spent by US Health Care System given
# Intervention",interventionName,
# "given presumed intervention cost") +
# geom_ribbon(aes(ymin=baseCost,ymax=interCost,fill=costs, alpha=1)) +
# geom_line(aes(y=baseCost, color=noInt)) +
# geom_line(aes(y=interCost, color=int)) +
# geom_line(aes(y=totSpent, color=costs)) +
# scale_fill_manual(values=c(costsC)) +
# scale_color_manual(values=c(costsC,intC,noIntC)) +
# guides(fill=F, alpha=F)
#
# #Total Cases Averted
# baseCases <- 1e6*(baseData$progAcute0 + baseData$progAcute1 +
# baseData$progChron0 + baseData$progChron1)
# baseCasesD <- 1e6*(baseData$progTotalD0 + baseData$progTotalD1)
# intCases <- 1e6*(rawData$progAcute0 + rawData$progAcute1 +
# rawData$progChron0 + rawData$progChron1)
# intCasesD <- 1e6*(rawData$progTotalD0 + rawData$progTotalD1)
# casesAverted <- baseCases - intCases
# casesAvertedD <- baseCasesD - intCasesD
#
# casesAvertedData <- data.frame(year=years,baseCases=baseCases,
# intCases=intCases,
# casesAverted=casesAverted)
# casesAvertedDataD <- data.frame(year=years,baseCases=baseCasesD,
# intCases=intCasesD,
# casesAverted=casesAvertedD)
#
# yrange <- round(seq(min(casesAvertedData$baseCases),
# max(casesAvertedData$baseCases),by=1e5),1)
# casesAvertedPlot <-
# ggplot(casesAvertedData,aes(x=year)) +
# labs(x="Years", y="Cases of TB", color="Intervention Status") +
# scale_y_continuous(breaks=yrange) +
# plotTitle("Total Cases of TB Averted given Intervention",interventionName) +
# geom_ribbon(aes(ymin=intCases,ymax=baseCases,fill=averted, alpha=0.2)) +
# geom_line(aes(y=baseCases, color=noInt)) +
# geom_line(aes(y=intCases, color=int)) +
# geom_line(aes(y=casesAverted, color=averted)) +
# scale_fill_manual(values=c(avertedC)) +
# scale_color_manual(values=c(avertedC,intC,noIntC)) +
# guides(fill=F, alpha=F)
#
# yrange <- round(seq(min(casesAvertedDataD$baseCases),
# max(casesAvertedDataD$baseCases),by=1e5),1)
# casesAvertedPlotD <-
# ggplot(casesAvertedDataD,aes(x=year)) +
# labs(x="Years", y="Discounted Cases of TB",
# color="Intervention Status") +
# scale_y_continuous(breaks=yrange) +
# plotTitle("Discounted Cases of TB Averted given Intervention",
# interventionName) +
# geom_ribbon(aes(ymin=intCases,ymax=baseCases,fill=averted, alpha=0.2)) +
# geom_line(aes(y=baseCases, color=noInt)) +
# geom_line(aes(y=intCases, color=int)) +
# geom_line(aes(y=casesAverted, color=averted)) +
# scale_fill_manual(values=c(avertedC)) +
# scale_color_manual(values=c(avertedC,intC,noIntC)) +
# guides(fill=F, alpha=F)
#
# #Cost per cases averted graph:
# cpcaData <- data.frame(year=yearsPC,cpca=1e9*totSpent[cutoffT:totT]/casesAverted[cutoffT:totT])
# cpcaDataD <- data.frame(year=yearsPC,cpca=1e9*totSpent[cutoffT:totT]/casesAvertedD[cutoffT:totT])
#
# cpcaPlot <-
# ggplot(cpcaData,aes(x=year)) +
# labs(x="Years", y="USD") +
# scale_x_continuous(breaks=c(initialYr,cutoffYr,seq(initialYr,finalYr,25))) +
# #scale_y_log10() +
# plotTitle("Cost per Raw TB Case Averted due to Intervention",
# interventionName) +
# geom_line(aes(y=cpca))
#
# cpcaPlotD <-
# ggplot(cpcaDataD,aes(x=year)) +
# labs(x="Years", y="USD") +
# scale_x_continuous(breaks=c(initialYr,cutoffYr,seq(initialYr,finalYr,25))) +
# #scale_y_log10() +
# plotTitle("Cost per Discounted TB Case Averted due to Intervention",
# interventionName) +
# geom_line(aes(y=cpca))
#
# #TB Deaths:
# baseDeaths <- 1e6*(baseData$tbdeath0 + baseData$tbdeath1)
# baseDeathsD <- 1e6*(baseData$tbdeathD0 + baseData$tbdeathD1)
# intDeaths <- 1e6*(rawData$tbdeath0 + rawData$tbdeath1)
# intDeathsD <- 1e6*(rawData$tbdeathD0 + rawData$tbdeathD1)
# deathsAverted <- baseDeaths - intDeaths
# deathsAvertedD <- baseDeathsD - intDeathsD
#
# deathsAvertedData <- data.frame(year=years,baseDeaths=baseDeaths,
# intDeaths=intDeaths,
# deathsAverted =deathsAverted)
# deathsAvertedDataD <- data.frame(year=years,baseDeaths=baseDeathsD,
# intDeaths=intDeathsD,
# deathsAverted=deathsAvertedD)
#
# yrange <- round(seq(min(deathsAvertedData$baseDeaths),
# max(deathsAvertedData$baseDeaths),by=5e3),1)
# deathsAvertedPlot <-
# ggplot(deathsAvertedData,aes(x=year)) +
# labs(x="Years", y="TB Deaths", color="Intervention Status") +
# scale_y_continuous(breaks=yrange) +
# plotTitle("Total TB Lives Saved Given Intervention",interventionName) +
# geom_ribbon(aes(ymin=intDeaths,ymax=baseDeaths,fill=averted, alpha=0.2)) +
# geom_line(aes(y=baseDeaths, color=noInt)) +
# geom_line(aes(y=intDeaths, color=int)) +
# geom_line(aes(y=deathsAverted, color=TBdeathsAverted)) +
# scale_fill_manual(values=c(TBdeathsAvertedC)) +
# scale_color_manual(values=c(intC,noIntC,TBdeathsAvertedC)) +
# guides(fill=F, alpha=F)
#
# yrange <- round(seq(min(deathsAvertedDataD$baseDeaths),
# max(deathsAvertedDataD$baseDeaths),by=5e3),1)
# deathsAvertedPlotD <-
# ggplot(deathsAvertedDataD,aes(x=year)) +
# labs(x="Years", y="Discounted TB Deaths", color="Intervention Status") +
# scale_y_continuous(breaks=yrange) +
# plotTitle("Discounted TB Lives Saved Given Intervention",interventionName) +
# geom_ribbon(aes(ymin=intDeaths,ymax=baseDeaths,fill=averted, alpha=0.2)) +
# geom_line(aes(y=baseDeaths, color=noInt)) +
# geom_line(aes(y=intDeaths, color=int)) +
# geom_line(aes(y=deathsAverted, color=TBdeathsAverted)) +
# scale_fill_manual(values=c(TBdeathsAvertedC)) +
# scale_color_manual(values=c(intC,noIntC,TBdeathsAvertedC)) +
# guides(fill=F, alpha=F)
#
#
#
# ggsave('paperRedEnLTBI.pdf',x,width=15,height=12)
fileName <- "redEnLTBI.pdf"
pdf(fileName,onefile=T)
print(incPlot)
#print(savingsPlot)
#print(costsPlot)
#print(casesAvertedPlot)
#print(casesAvertedPlotD)
#print(cpcaPlot)
#print(cpcaPlotD)
#print(deathsAvertedPlot)
#print(deathsAvertedPlotD)
dev.off()
|
9e6bb20d8f5f8e3c6c993d70755bde46c6950f81 | fed4b7e86cea3d0bd3f25449135f397f6e1bd9c9 | /PairwiseTTests.R | 292268ec0234f0038ae2e31f924445acb392bf18 | [] | no_license | pkiekel/CentraliaCollegeMath246 | a55589f3ed3ebdc855f00e2ff12a23cfebb5bc1e | 7183404a11621ae24dc9aa4d579cf167be025ba9 | refs/heads/master | 2021-01-21T13:33:44.419092 | 2018-08-07T17:17:46 | 2018-08-07T17:17:46 | 55,106,389 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 365 | r | PairwiseTTests.R | hsb2<-read.table("http://www.ats.ucla.edu/stat/data/hsb2.csv", sep=",", header=T)
attach(hsb2)
tapply(write, ses, mean)
tapply(write, ses, sd)
a1 <- aov(write ~ ses)
summary(a1)
pairwise.t.test(write, ses, p.adj = "none")
pairwise.t.test(write, ses, p.adj = "bonf")
TukeyHSD(a1)
a2 <- aov(write ~ ses + female)
summary(a2)
TukeyHSD(a2, "ses")
|
27c82966efdf77ed3395561674ffab9ad03992a0 | 7ebe092c7171d9c370b7c89995bc00f6ffa305cd | /lib/model.fitting.functions.R | e8ee93b743d7ce90c167869d0de8b1269d2ead2c | [
"MIT"
] | permissive | stoufferlab/annual-plant-dynamics | df25eb9b4d3c4cfe70858ffb7cbd2b3f8d813aa8 | 2849ef4cffae362dcf156c97a6600a90847f0d2c | refs/heads/master | 2023-04-16T18:11:45.500162 | 2022-08-01T01:30:40 | 2022-08-01T01:30:40 | 516,149,479 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,617 | r | model.fitting.functions.R |
##################################
# model fitting functions
##################################
# produce two-species model prediction given:
# 1. model parameters
# 2. a set of initial conditions for all state variables
fecundity.model.predict = function(params, plants.i, plants.j, seeds.i, seeds.j, time, focal, verbose=FALSE){
# set the parameters in the ode solver
fecundity_dynamics_set_params(
gamma_i = params["gamma_i"],
mu_i = params["mu_i"],
nu_i = params["nu_i"],
r_i = exp(params["log_r_i"]),
K_i = params["K_i"],
beta_i = params["beta_i"],
gamma_j = params["gamma_j"],
mu_j = params["mu_j"],
nu_j = params["nu_j"],
r_j = exp(params["log_r_j"]),
K_j = params["K_j"],
beta_j = params["beta_j"],
alpha_ij = params["alpha_ij"],
alpha_ji = params["alpha_ji"]
)
# for tracing purposes
if(verbose>1){
print(params)
message("predicting")
flush(stdout())
}
# container for the predicted outputs
predicted.fecundity <- numeric(length(plants.i))
# iterate over all observations
for(i in seq.int(length(plants.i))){
# for tracing purposes
if(verbose>2){
message(i)
flush(stdout())
}
# starting conditions are viable seeds in seed bank, plants, and plant biomass
x0 <- c(
seeds.i[i], plants.i[i], plants.i[i] * params["beta_i"],
seeds.j[i], plants.j[i], plants.j[i] * params["beta_j"]
)
# output order matches the conditions above but with time elapsed in first column
growing.season <- fecundity_dynamics(x0, time[i], time[i]/1000.)
colnames(growing.season) <- c(
"time.elapsed",
"seeds.i",
"plants.i",
"biomass.i",
"seeds.j",
"plants.j",
"biomass.j"
)
# we only want the final values to make our prediction
growing.season <- growing.season[nrow(growing.season),]
# convert biomass to per capita fecundity
growing.season$fecundity.i <- exp(params["log_phi_i"]) * growing.season$biomass.i / growing.season$plants.i
growing.season$fecundity.j <- exp(params["log_phi_j"]) * growing.season$biomass.j / growing.season$plants.j
# select the fecundity corresponding to the focal species
predicted.fecundity[i] <- growing.season[,paste0("fecundity.",focal[i])]
}
# for tracing purposes
if(verbose>1){
message("predicted")
flush(stdout())
}
return(predicted.fecundity)
}
# calculate the negative loglikelihood of a set of observations given:
# 1. model parameters
# 2. a set of initial conditions for all state variables
# 3. observed fecundities
fecundity.model.NLL = function(params, plants.i, plants.j, seeds.i, seeds.j, time, focal, fecundity, verbose=0){
# calculate the vector of predicted values using function above
predicted.fecundity <- fecundity.model.predict(
params,
plants.i = plants.i,
plants.j = plants.j,
seeds.i = seeds.i,
seeds.j = seeds.j,
time = time,
focal = focal,
verbose = verbose
)
# anywhere in parameter space that is non-biolgical or uninformative should be avoided
# otherwise we treat observed fecundities as Poisson observations to calculate the log-likelihood
if(!all(predicted.fecundity > 0) || any(!is.finite(predicted.fecundity))){
return(Inf)
}else{
nll <- -sum(dpois(fecundity, predicted.fecundity, log=TRUE))
}
# for tracing purposes
if(verbose>0){
print(nll)
flush(stdout())
}
return(nll)
}
# define the order of parameters ; this is a requirement of mle2 to use a parameter vector (like optim)
parnames(fecundity.model.NLL) <- c(
"gamma_i",
"gamma_j",
"mu_i",
"mu_j",
"nu_i",
"nu_j",
"log_r_i",
"log_r_j",
"K_i",
"K_j",
"beta_i",
"beta_j",
"log_phi_i",
"log_phi_j",
"alpha_ij",
"alpha_ji"
)
|
fd2af82179b5d33372f94e17b08b4e623a57821d | f885f99d0090261317b8528128a1a72958760610 | /R/case_id.r | 274fa4184b0086795ca57376400478a236da5c85 | [] | no_license | BijsT/bupaR | 7a78d0d15655866264bab2bb7882602804303272 | 19f5e63c7393be690addf3c3977f1d00d0cdbfaf | refs/heads/master | 2021-08-26T06:40:32.388974 | 2017-11-21T23:12:47 | 2017-11-21T23:12:47 | 111,611,796 | 0 | 0 | null | 2017-11-21T23:11:10 | 2017-11-21T23:11:09 | null | UTF-8 | R | false | false | 524 | r | case_id.r | #' @title Case classifier
#'
#' @description Get the case classifier of an object of class \code{eventlog}
#'
#' @param eventlog An object of class \code{eventlog}.
#'
#' @seealso \code{\link{eventlog}}, \code{\link{activity_id}},
#' \code{\link{lifecycle_id}}, \code{\link{activity_instance_id}}
#'
#'
#'
#' @export case_id
#'
case_id <- function(eventlog){
if("eventlog" %in% class(eventlog))
return(attr(eventlog, "case_id"))
else
stop("Function only applicable on objects of type 'eventlog'")
}
|
5efcf92d21ba448b7a1f574dd4c0b968d1ffb23a | b63ad7afa41c810687e5d312056a4443cfa42aac | /R/R6UMLR2Base.R | 96506864078e8b63f86f1973d0f9c9d835e14b32 | [] | no_license | Grandez/umlr2 | 0f42ba1499e64785ea95feedef94e086a120140a | 0cbde78918086afcded5fc7cfa8e83e00d53ce11 | refs/heads/master | 2022-12-05T01:31:27.667844 | 2020-08-27T22:06:36 | 2020-08-27T22:06:36 | 283,737,694 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,991 | r | R6UMLR2Base.R | #' @name UMLR2Base
#' @title UMLR2Base
#' @rdname R6UMLR2BASE
#' @docType class
#' @description Clase base del paquete.
UMLR2Base = R6::R6Class("R6UMLR2BASE"
,portable = FALSE
,lock_objects = TRUE
,lock_class = TRUE
,public = list(
#' @description Inicializador
#' @param ... datos para configuracion
initialize = function(...) {
# if (substr(as.character(sys.call(-1))[1], 1, 9) == "UMLR2BASE") msg$err("E900", "UMLR2BASE")
cfg$setConfig(...)
}
#' @description Cambia los datos de configuracion de la instancia
#' @param ... named values para definir la configuracion
#' @return La instancia del objeto
,setConfig = function(...) {
cfg$setConfig(...)
invisible(self)
}
#' @description Obtiene los datos de configuracion
#' @return Una lista con los datos de configuracion
,getConfig = function() { cfg }
#' @description Chequea si la configuracion es correcta
#' @details Esta funcion no verifica que los datos sean reales.
#' Para verificar completamente el sistema usar checkInstallation
#' @param verbose Muestra mensajes informativos
#' @param first Detiene el proceso en el primer error (si lo hay)
#' @return TRUE si lo es
#' FALSE si no
,checkConfiguration = function(verbose=TRUE, first=FALSE) { cfg$checkConfiguration(verbose, first) }
#' @description Chequea si la configuracion y las dependencias son correctas y estan disponibles
#' @param verbose Muestra mensajes informativos
#' @param first Detiene el proceso en el primer error (si lo hay)
#' @return TRUE si lo es
#' FALSE si no
,checkInstallation = function(verbose=TRUE, first=FALSE) { cfg$checkInstallation (verbose, first) }
)
,private = list(S3Class = "S3UMLR2"
,cfg = CONFIG$new()
,msg = UMLR2MSG$new()
)
)
|
94915657aefeb358d8de4ae8a3546cf4a404150d | 632acd6591c71ab7f638092b152794c230f26967 | /siliconvalley/dplyr.R | 57037402292fe0415a81305eae77a395dd737b51 | [] | no_license | kimjh2807/rstudio | 2fdc3dba4dcccf659b2d3a9a3e1cf0331815ef4e | dfdc1831fa65cf82c0cd926967494d573ff45fc5 | refs/heads/master | 2021-06-03T05:50:57.981205 | 2020-04-03T06:54:11 | 2020-04-03T06:54:11 | 111,923,598 | 0 | 6 | null | null | null | null | UTF-8 | R | false | false | 2,580 | r | dplyr.R | # dplyr (p.51~)
library(dplyr)
# tbl_df
iris
i2 <- tbl_df(iris) # tbl_df()
class(i2)
i2
# glimpse
glimpse(i2) # all variable can see with transpose
# %>%
iris %>% head
iris %>% head(10)
# install "gapminder"
install.packages("gapminder")
library(gapminder)
gapminder <- tbl_df(gapminder)
gapminder
glimpse(gapminder)
# filter()
filter(gapminder, country=='Korea, Rep.')
filter(gapminder, year==2007)
filter(gapminder, country=='Korea, Rep.' & year== 2007)
gapminder %>% filter(country == 'Korea, Rep.')
gapminder %>% filter(year == 2007)
gapminder %>% filter(country == 'Korea, Rep.' & year == 2007)
# arrange()
arrange(gapminder, year, country)
gapminder %>% arrange(year, country)
# select()
select(gapminder, pop, gdpPercap)
gapminder %>% select(pop, gdpPercap)
# mutate()
gapminder %>% mutate(total_gdp = pop * gdpPercap,
le_gdp_ratio = lifeExp / gdpPercap,
lgrk = le_gdp_ratio * 100)
# summarize()
gapminder %>%
summarize(n_obs = n(),
n_countries = n_distinct(country),
n_year = n_distinct(year),
med_gdpc = median(gdpPercap),
max_gdppc = max(gdpPercap))
# distinct()
distinct(select(gapminder, country))
distinct(select(gapminder, year))
gapminder %>% select(country) %>% distinct()
gapminder %>% select(year) %>% distinct()
# group_by()
gapminder %>%
filter(year == 2007) %>%
group_by(continent) %>%
summarise(median(lifeExp))
gapminder %>%
filter(year == 2002) %>%
group_by(country) %>%
summarise(lifeExp = median(lifeExp)) %>%
arrange(-lifeExp)
# join
tbl_df(gapminder)
distinct(select(gapminder, country))
filter(gapminder, country == 'Korea, Rep.')
filter(gapminder, year == 2007)
filter(gapminder, country == 'Korea, Rep.' & year == 2007)
gapminder %>% filter(country == 'Korea, Rep.') %>% filter(year == 2007)
gapminder %>% filter(country == 'Korea, Rep.') %>% select(year, lifeExp)
data_yl <- gapminder %>% filter(country == 'Korea, Rep.') %>% select(year, lifeExp)
plot(data_yl)
data_yp <- gapminder %>% filter(country == 'Korea, Rep.') %>% select(year, pop)
plot(data_yp)
data_ygpc <- gapminder %>% filter(country == 'Korea, Rep.') %>% select(year, gdpPercap)
plot(data_ygpc)
gapminder %>% mutate(total_gdp = pop * gdpPercap)
# method 1
d1 = filter(gapminder, year == 2007)
d2 = group_by(d1, continent)
d3 = summarize(d2, lifeExp = median(lifeExp))
arrange(d3, -lifeExp)
arrange(d1, -lifeExp)
# method 2
gapminder %>% filter(year == 2007) %>% group_by(continent) %>%
summarize(lifeExp = median(lifeExp)) %>% arrange(-lifeExp)
|
7b7f9a85728ae8f8fcd5cbba408868c243dc395a | 2c1805e79d915c88faa0f6c258fc41e95937dba5 | /R/Unity/quest_step_position.R | 7e998d751f34c2b35955d200d636e3a8af9203ea | [] | no_license | hejtmy/VR_City_Analysis | b85c14ddc7aad5db8aeeb353ae02462986b20e59 | b149d3f52d76fc8fb0104fa42ec7b38ae7470ba0 | refs/heads/master | 2021-01-18T16:16:53.962471 | 2017-05-21T22:01:26 | 2017-05-21T22:01:34 | 49,779,651 | 0 | 0 | null | 2017-02-18T17:35:16 | 2016-01-16T15:48:50 | R | UTF-8 | R | false | false | 1,073 | r | quest_step_position.R | quest_step_position = function(quest = NULL, step_id){
#parameter validation
if(is.null(quest)){
SmartPrint(c("ERROR:quest_step_position:MissingParameter", "TYPE:quest", "DESCRIPTION:", "parameter not provided"))
return(NULL)
}
if(!is.numeric(step_id)){
SmartPrint(c("ERROR:quest_step_position:WrongParameterType", "TYPE:step_id", "DESCRIPTION:", "Parameter has type ", (class(step_id))," required is numeric"))
return(NULL)
}
step = quest$steps %>% filter(ID == step_id)
if(nrow(step) == 0){
SmartPrint(c("ERROR:quest_step_position:MissingStep", "ID: ", step_id, "DESCRIPTION:", "There is no quest of such ID"))
return(NULL)
}
if(nrow(step) == 0){
SmartPrint(c("ERROR:quest_step_position:NonUnique", "ID: ", step_id, "DESCRIPTION:", "There are more quests with given ID"))
return(NULL)
}
if(step$Transform =="NO transform"){
SmartPrint(c("WARNING:quest_step_position:NOTransform", "ID: ", step_id, "DESCRIPTION:", "Step has no transform"))
return(NULL)
}
return(text_to_vector3(step$Transform)[c(1,3)])
} |
11dbdcadcac15db777b2c15029723b7e54d7acc8 | f5f887250c22676073946936c27306e1d61c48e8 | /test_shiny_app.R | b8340a1fede633f5713ea2e7a08c74e480c8ce47 | [] | no_license | conorotompkins/model_allegheny_house_sales | 8b6d015056c4fc3b55d6e23a37edb2c7ab559f7a | a844e549ab1ab28574847a1ba7215ee9668a223c | refs/heads/main | 2023-03-23T23:35:22.513918 | 2021-03-17T21:17:36 | 2021-03-17T21:17:36 | 320,881,494 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,072 | r | test_shiny_app.R | #set up
# Load R packages
library(shiny)
library(shinythemes)
library(shinyWidgets)
library(tidyverse)
library(tidymodels)
library(usemodels)
library(hrbrthemes)
library(scales)
library(leaflet)
#https://towardsdatascience.com/build-your-first-shiny-web-app-in-r-72f9538f9868
#https://shiny.rstudio.com/tutorial/
source("scripts/shiny_app/read_ui_input_values.R")
# Define UI
ui <- fluidPage(theme = shinytheme("cerulean"),
title = "Allegheny County Home Sale Price Estimator",
titlePanel(title = "Allegheny County Home Sale Price Estimator"),
fluidRow(
#tabPanel("Inputs", # sidebarPanel
column(3, #column 1
# selectInput(inputId = "school_desc_choice",
# label = "School district",
# choices = pull(school_desc_distinct, school_desc),
# multiple = FALSE,
# selectize = TRUE),
selectInput(inputId = "style_desc_choice",
label = "Style",
choices = pull(style_desc_distinct, style_desc),
selectize = TRUE,
multiple = FALSE),
selectInput(inputId = "grade_desc_choice",
label = "Grade",
choices = pull(grade_desc_distinct, grade_desc),
multiple = FALSE,
selected = "Average"),
selectInput(inputId = "condition_desc_choice",
label = "Condition",
choices = pull(condition_desc_distinct, condition_desc),
multiple = FALSE,
selected = "Average"),
sliderInput(inputId = "lot_area_choice",
label = "Lot Area (sq. ft)",
#min = pull(lot_area_range_min, lot_area),
#max = pull(lot_area_range_max, lot_area),
min = 0,
max = 10000,
value = 2000),
sliderInput(inputId = "finished_living_area_choice",
label = "Finished Living Area (sq. ft)",
#min = pull(finished_living_area_min, finished_living_area),
#max = pull(finished_living_area_max, finished_living_area),
min = 0,
max = 4000,
value = 2000),
sliderInput(inputId = "bedrooms_choice",
label = "Bedrooms",
min = 1,
max = 6,
value = 1),
sliderInput(inputId = "fullbaths_choice",
label = "Full bathrooms",
min = 1,
max = 4,
value = 1),
sliderInput(inputId = "halfbaths_choice",
label = "Half bathrooms",
min = 0,
max = 4,
value = 0),
sliderInput(inputId = "year_blt_choice",
label = "Year house was built",
min = pull(year_blt_min, year_blt),
max = pull(year_blt_max, year_blt),
value = 1948,
sep = ""),
verbatimTextOutput("txtout")
), #column 1
column(9, # column 2
#plotOutput("school_desc_map"),
leafletOutput("school_district_map"),
plotOutput("model_output_graph"),
tableOutput("model_output_table")
) #column 2
) # fluidRow
) # fluidPage
# Define server function
server <- function(input, output) {
#create data to predict on
predict_data_reactive <- reactive({
req(selected_school_desc())
tibble(par_id = "test",
house_age_at_sale = 2020 - input$year_blt_choice,
lot_area = input$lot_area_choice,
finished_living_area = input$finished_living_area_choice,
bedrooms = input$bedrooms_choice,
fullbaths = input$fullbaths_choice,
halfbaths = input$halfbaths_choice,
school_desc = selected_school_desc(),
style_desc = input$style_desc_choice,
grade_desc = input$grade_desc_choice,
condition_desc = input$condition_desc_choice,
longitude = 1,
latitude = 1) %>%
left_join(finished_living_area_summary) %>%
left_join(lot_area_summary) %>%
mutate(finished_living_area_zscore = (finished_living_area - finished_living_area_mean) / finished_living_area_sd,
lot_area_zscore = (lot_area - lot_area_mean) / lot_area_sd) %>%
select(-c(matches("mean$|sd$"), lot_area, finished_living_area))
})
predictions_reactive <- reactive({
#predict on data
model_fit %>%
predict(predict_data_reactive()) %>%
mutate(.pred = 10^.pred) #%>%
# bind_cols(model_fit %>%
# predict(predict_data_reactive(), type = "conf_int") %>%
# mutate(across(matches("^.pred"), ~10^.x)))
})
representative_sample_reactive <- reactive({
full_results %>%
semi_join(predict_data_reactive(), by = c("school_desc", "style_desc"))
})
plot_parameters_reactive <- reactive({
representative_sample_reactive() %>%
pull(sale_price_adj) %>%
hist(breaks = 30) %>%
.$counts %>%
enframe() %>%
summarize(max_count = max(value)) %>%
pull(max_count)
})
output$txtout <- renderText({
list(str_c("School district:", selected_school_desc(), sep = " "),
str_c("Grade:", input$grade_desc_choice, sep = " "),
str_c("Condition:", input$condition_desc_choice, sep = " "),
str_c("Style:", input$style_desc_choice, sep = " "),
str_c("Lot area:", comma(input$lot_area_choice), sep = " "),
str_c("Finished living area:", comma(input$finished_living_area_choice), sep = " "),
str_c("Bedrooms:", input$bedrooms_choice, sep = " "),
str_c("Full Bathrooms:", input$fullbaths_choice, sep = " "),
str_c("Half Bathrooms:", input$halfbaths_choice, sep = " "),
str_c("Year built:", input$year_blt_choice, sep = " ")) %>%
glue::glue_collapse(sep = "\n")
})
output$model_output_table <- renderTable({
predictions_reactive() %>%
mutate(.pred = dollar(.pred)#,
#.pred_upper = dollar(.pred_upper),
#.pred_lower = dollar(.pred_lower)
) %>%
rename(`Average Predicted Price` = .pred#,
#`Upper bound` = .pred_upper,
#`Lower bound` = .pred_lower
) #%>%
#select(`Lower bound`, `Average Predicted Price`, `Upper bound`)
})
output$model_output_graph <- renderPlot({
representative_sample_reactive() %>%
ggplot(aes(x = sale_price_adj)) +
geom_histogram(fill = "grey", color = "black") +
# annotate(geom = "rect",
# xmin = predictions_reactive()$.pred_lower, xmax = predictions_reactive()$.pred_upper,
# ymin = 0, ymax = Inf, fill = "#FCCF02", alpha = .7) +
geom_vline(aes(xintercept = predictions_reactive()$.pred),
color = "#FCCF02",
size = 2) +
scale_x_continuous(labels = scales::dollar_format()) +
scale_y_comma() +
coord_cartesian(ylim = c(0, plot_parameters_reactive() * 1.4)) +
labs(title = str_c(nrow(representative_sample_reactive()) %>% comma(), "sales of",
distinct(representative_sample_reactive())$style_desc, "homes in",
distinct(representative_sample_reactive())$school_desc,
sep = " "),
x = "Sale Price",
y = "Count of similar homes") +
theme_ipsum(base_size = 20) +
theme(panel.background = element_rect(fill = "black"),
axis.title.x = element_text(size = 18),
axis.title.y = element_text(size = 18))
})
# output$school_desc_map <- renderPlot({
#
# #full_results %>%
# school_district_shapes %>%
# semi_join(predict_data_reactive(), by = "school_desc") %>%
# ggplot() +
# geom_sf(data = ac_boundary, fill = "black") +
# geom_sf(data = ac_water, fill = "white") +
# geom_sf(fill = "#FCCF02", color = "#FCCF02", alpha = .7, size = NA) +
# theme_void()
#
# })
output$school_district_map <- renderLeaflet({
school_district_shapes %>%
leaflet("school_district_map") %>%
addProviderTiles(providers$Stamen.TonerLite,
options = providerTileOptions(noWrap = TRUE,
minZoom = 9,
#maxZoom = 8
)) %>%
setView(lng = -80.01181092430839, lat = 40.44170119122286, zoom = 9) %>%
setMaxBounds(lng1 = -79.5, lng2 = -80.5, lat1 = 40.1, lat2 = 40.7) %>%
addPolygons(layerId = ~school_desc,
fillColor = "#FCCF02",
fillOpacity = .7,
stroke = TRUE,
color = "black",
weight = 1)
})
#capture click from leaflet map
selected_school_desc <- reactive({input$school_district_map_shape_click$id})
observe({ #observer
req(selected_school_desc())
# if (length(selected_school_desc()) == 0)
# return()
#
# else {
#filter and map
leafletProxy("school_district_map", data = filter(school_district_shapes, school_desc == input$school_district_map_shape_click$id)) %>%
clearGroup("highlight_shape") %>%
clearGroup("popup") %>%
addPolygons(group = "highlight_shape") %>%
addPopups(popup = ~school_desc,
group = "popup",
lng = ~lng,
lat = ~lat)
#}
}) #observer
}
# Create Shiny object
shinyApp(ui = ui, server = server)
|
d4efb8d877548f6ad6c44367caa59af48aae1aa1 | 2c707faace6d70238496097c5bbe8923d847b6fd | /man/print.Btest.Rd | 88c2e080fa8210ec97af1982bd2865cf6cc5209a | [] | no_license | comodin19/BayesVarSel | 573d5a872e08556e4eb8ceefae9d18a63dcbf281 | c534bf878f5b863d4f5f7bc283c957ce47532fe4 | refs/heads/master | 2023-03-17T02:59:13.682232 | 2023-03-08T11:34:23 | 2023-03-08T11:34:23 | 82,951,316 | 9 | 10 | null | 2023-03-08T11:34:25 | 2017-02-23T17:11:23 | C | UTF-8 | R | false | true | 941 | rd | print.Btest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Btest.R
\name{print.Btest}
\alias{print.Btest}
\title{Print an object of class \code{Btest}}
\usage{
\method{print}{Btest}(x, ...)
}
\arguments{
\item{x}{Object of class Btest}
\item{...}{Additional parameters to be passed}
}
\description{
Print an object of class \code{Btest}
}
\examples{
\dontrun{
#Analysis of Crime Data
#load data
data(UScrime)
#Model selection among the following models: (note model1 is nested in all the others)
model1<- y ~ 1 + Prob
model2<- y ~ 1 + Prob + Time
model3<- y ~ 1 + Prob + Po1 + Po2
model4<- y ~ 1 + Prob + So
model5<- y ~ .
#Equal prior probabilities for models:
crime.BF<- Btest(models=list(basemodel=model1,
ProbTimemodel=model2, ProbPolmodel=model3,
ProbSomodel=model4, fullmodel=model5), data=UScrime)
crime.BF
}
}
\seealso{
See \code{\link[BayesVarSel]{Btest}} for creating objects of the class \code{Btest}.
}
|
5cc7c6d913ff866a08f142818926d595f57634f5 | e0219998a64a696a974e41fc341115704d4a9787 | /source/ini_pr_i.R | eae90295da687b245ac5827cc360cb2472be5348 | [] | no_license | micheledemeo/datacontrol | 81022d65b11d219d11753e7d842c46e102207c8b | c9189a5174976c52d478f682670291785f8b3d49 | refs/heads/master | 2020-06-02T09:16:36.883941 | 2016-08-27T11:23:22 | 2016-08-27T11:23:22 | 27,638,132 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,567 | r | ini_pr_i.R |
# aggiorna i sent in flotta
setkey(flotta, id_battello)
flotta[.( all[sent==1,unique(id_battello)] ) , sent:=1 ]
flotta[is.na(sent) , sent:=0]
# strati con almeno un sent=0 e almeno 2 unità con sent=1
flotta[,remove_to_hv:=0]
setkey(flotta, id_strato,sent)
#str_sent: strati con almeno un campionario inviato. Mettendo sent=0, i battelli campionari di questo strato che hanno sent=0(non inviati), vanno eliminati dal calcolo
str_sent=flotta[,sum(sent), by=id_strato][V1>1,.(id_strato,sent=0)]
setkey(str_sent, id_strato,sent)
# tag per battelli campionari da non considerare nel calcolo hv, essendoci nello strato almeno due unità con sent=1 che possono esser usati come proxy per le mancate risposte (ib_battello>0 & sent=0)
flotta[str_sent, remove_to_hv:=ifelse(id_battello>0,1,0)]
flotta_temp=flotta[,list(id_strato,lft,id_battello=ifelse(remove_to_hv==1,0,id_battello))]
# calcola pr_i e gestisci censimenti####
setkey(flotta_temp, id_strato,lft)
flotta_temp[,eti:=1:nrow(.SD), by=id_strato]
strati_censimento=flotta_temp[,list(.N,n=sum(ifelse(id_battello>0,1,0))),keyby=id_strato][N-n==0,id_strato]
if(length(strati_censimento)>0) {
pr_i=flotta_temp[!id_strato %in% strati_censimento,data.table(.SD[id_battello>0,.(lft,id_battello,eti)],pr_i=diag(hv_pij(lft, n=nrow(.SD[id_battello>0]), eti=.SD[id_battello>0,eti], M=T) ) ) , keyby=id_strato]
pr_i=pr_i[,list(id_battello,pr_i)]
pr_i=rbindlist(list(pr_i,flotta_temp[id_strato %in% strati_censimento,.(id_battello, pr_i=1)]))
} else {
pr_i=flotta_temp[,data.table(.SD[id_battello>0,.(lft,id_battello,eti)],pr_i=diag(hv_pij(lft, n=nrow(.SD[id_battello>0]), eti=.SD[id_battello>0,eti], M=T) ) ) , keyby=id_strato]
pr_i=pr_i[,list(id_battello,pr_i)]
}
# calcolo fattore di correzione
if ( exists("cy") ) {
setkey(cy,id_strato)
ric=all[var=="ricavi", list(id_battello,id_strato,value)]
setkey(ric,id_battello)
setkey(pr_i,id_battello)
#nomatch = 0 => esclude i battelli che vengono rimossi perché non inviati, ma presenti in strati che in cui ci sono almeno 2 inviati
ric=pr_i[ric,nomatch=0][,ric_esp_nisea:=sum(value/pr_i),by=id_strato]
setkey(ric,id_strato)
ric=cy[ric]
ric[is.na(ricavi), ricavi:=ric_esp_nisea] # corr_fact will be 1 for that
ric[,corr_fact:=ricavi/ric_esp_nisea]
setkey(ric,id_battello)
# weight_with_correction = weight * corr_fact = 1/pr * corr_fact -->
# pr_with_correction= 1 / weight_with_correction = 1/(1/pr * corr_fact) = pr / corr_fact
pr_i[ric, pr_i:=pr_i*(1/corr_fact)]
rm(ric)
}
setkey(pr_i,id_battello)
rm(flotta_temp) |
8866763c368c5648482638a048015b4b2a052fbc | ca807743c5b9f9c4e17ee8e5526486a8288e4193 | /RUN_FIRST-create_data_matrix.R | 295d2f7d7f82b99fcf3b3d85bf7671db34649630 | [] | no_license | wsdaniels/COmodeling | bb144f09435d4b193e2e1c66f0cc37a8c626f4d4 | 20785951935226beae72f5a7ced746761e24fbc8 | refs/heads/main | 2023-05-27T17:24:19.414034 | 2023-05-10T20:40:39 | 2023-05-10T20:40:39 | 404,949,625 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,242 | r | RUN_FIRST-create_data_matrix.R | rm(list = ls())
# Install and load required packages
if ("lubridate" %in% rownames(installed.packages()) == F){
install.packages("lubridate")
}
if ("RAMP" %in% rownames(installed.packages()) == F){
install.packages("RAMP")
}
library(lubridate)
library(RAMP)
# Set base directory
base.dir <- 'https://raw.github.com/wsdaniels/COmodeling/main/'
# GET RESPONSE DATA
response <- read.csv(paste0(base.dir, "MSEA_V8JMOPITT_weeklyanomalies_WEDCEN_nofill.csv"))
response$time <- ymd(response$time)
# Placeholder for missing CO values. This will get used later
missing.val <- -9999
# GET PREDICTOR DATA
nino <- read.csv(paste0(base.dir, "nino34_weekly_avg.csv"))
aao <- read.csv(paste0(base.dir, "aao_weekly_avg.csv"))
tsa <- read.csv(paste0(base.dir, "tsa_weekly_avg.csv"))
dmi <- read.csv(paste0(base.dir, "dmi_weekly_avg.csv"))
olr.msea <- read.csv(paste0(base.dir, "msea_olr.csv"))
# remove partial first entry in olr
olr.msea <- olr.msea[2:nrow(olr.msea),]
# PUT PREDICTOR DATA INTO A LIST
predictors <- list("nino" = nino,
"dmi" = dmi,
"tsa" = tsa,
"aao" = aao,
"olr.msea" = olr.msea)
# Fix column alignment
# NOTE: aao doesn't need the correction for some reason
for (i in 1:3){
this.var <- predictors[[i]]
this.var[,2] <- this.var[,1]
this.var[,1] <- rownames(this.var)
row.names(this.var) <- NULL
predictors[[i]] <- this.var
}
# Convert time variable to lubridate datetime
for (i in 1:length(predictors)){
this.var <- predictors[[i]]
this.var$time <- ymd(this.var$time)
predictors[[i]] <- this.var
}
# Clean up
rm(aao,dmi,nino,tsa,olr.msea,this.var,i)
# ALIGN START OF PREDICTOR TIME SERIES
# Get the latest start date - this will be used to align the starts
start.date <- max(as_date(sapply(predictors, function(X) X$time[1])))
for (i in 1:length(predictors)){
this.var <- predictors[[i]]
this.var <- this.var[!(this.var$time < start.date), ]
predictors[[i]] <- this.var
}
# ALIGN END OF PREDICTOR TIME SERIES
terminal.date <- response$time[length(response$time)]
for (i in 1:length(predictors)){
this.var <- predictors[[i]]
this.var <- this.var[!(this.var$time > terminal.date), ]
predictors[[i]] <- this.var
}
# Clean up
rm(this.var, terminal.date, i)
#------------------------------------------------
# ALL END DATES ARE ALLIGNED AT THIS POINT
# ALL TIME VARIABLES ARE THE SAME AT THIS POINT
#------------------------------------------------
# COMPUTE OFFSETS FROM FINAL RESPONSE OBSERVATION
# This will be used for lag calculations later
for (i in 1:length(predictors)){
predictors[[i]]$offset <- seq(nrow(predictors[[i]])-1, 0)
}
response$offset <- seq(nrow(response)-1, 0)
# COMPUTE SMOOTHED CURVES
# Here we compute gradually smoother gaussian kernels
# Smoothed indices are used for longer lags
x.dist <- response$time[2] - response$time[1]
max.mult <- 8
min.mult <- 1
mult.seq <- seq(min.mult, max.mult, length.out = 8)
for (i in 1:length(predictors)){
for (j in 1:length(mult.seq)){
this.gaussian.kernel <- ksmooth(x = predictors[[i]]$time,
y = predictors[[i]]$anomaly,
kernel = "normal",
bandwidth = mult.seq[j]*x.dist)
predictors[[i]][ length(predictors[[i]]) + 1 ] <- this.gaussian.kernel$y
names(predictors[[i]])[ length(predictors[[i]]) ] <- paste0("gaussian.kernel.",
mult.seq[j])
}
}
rm(this.gaussian.kernel, i, j, x.dist)
# CREATE MASKS FOR THE MONTHS WE WANT TO EXPLAIN THE RESPONSE
#---------------------------------------------------------------------
# Period over which to explain response data (in months)
# This corresponds to fire season in MSEA
study.period <- c(9, 12)
# Create vector of months to keep
if (study.period[1] <= study.period[2]){
months.to.keep <- study.period[1] : study.period[2]
} else {
months.to.keep <- c(study.period[1] : 12, 1 : study.period[2])
}
# Create month mask
month.mask <- month(response$time) %in% months.to.keep
# Apply month mask
response <- response[month.mask,]
rm(month.mask, months.to.keep, study.period)
#---------------------------------------------------------------------
# Remove NA from response
# NOTE: this must be done AFTER setting up the offsets!!!!!!
response <- response[!(response$num_obs == missing.val), ]
# SET LAG LIMITS
min.lag <- 1
max.lag <- 52
lag.vals <- list(nino = min.lag:max.lag,
dmi = min.lag:max.lag,
tsa = min.lag:max.lag,
aao = min.lag:max.lag,
olr.msea = min.lag:max.lag)
# DEFINE SMOOTHING PARAMETERS
start.smoothing <- 4
lag.dividers <- seq(start.smoothing, max.lag, length.out = length(mult.seq)+1)
lag.dividers <- round(lag.dividers)
# COUNT TOTAL NUMBER OF LAGS TO CONSIDER
n.lag <- 0
for (i in 1:length(lag.vals)){
n.lag <- n.lag + length(lag.vals[[i]])
}
# BUILD DATA MATRIX TO BE USED IN RAMP
data.matrix <- data.frame(matrix(NA, ncol = n.lag, nrow = nrow(response)))
# FILL DATA MATRIX
it <- 1
for (i in 1:length(predictors)){
these.lags <- lag.vals[[i]]
if (length(these.lags) > 0){
for (j in 1:length(these.lags)){
var.offsets <- predictors[[i]]$offset
required.offsets <- response$offset + these.lags[j]
to.keep <- var.offsets %in% required.offsets
if (these.lags[j] < start.smoothing){
this.var <- predictors[[i]]$anomaly[to.keep]
} else {
for (k in seq( 1, length(lag.dividers)-1 )){
if (these.lags[j] >= lag.dividers[k] & these.lags[j] < lag.dividers[k+1]){
this.string <- paste0("gaussian.kernel.", mult.seq[k])
this.var <- predictors[[i]][, this.string][to.keep]
break
}
}
}
data.matrix[,it] <- this.var
var.name <- paste0(names(predictors)[i], "_", these.lags[j])
names(data.matrix)[it] <- var.name
it <- it + 1
}
}
}
rm(i, it, j, n.lag, required.offsets, these.lags, this.var, to.keep,
var.name, var.offsets, lag.vals)
write.csv(data.matrix, "data_matrix.csv")
write.csv(response, "response.csv")
|
60d0040694a46d68d8b547b133051395f8e802be | e8bb53f264224f2b72b9b6e2f715080f98914fdf | /04_ExploratoryDataAnalysis/code/Lesson1_LatticePlottingSystem_w2.R | 43a92cc5e4374d55738643f9c19ae9d9007e0e2c | [] | no_license | pritraj90/DataScienceR | 0ef2550590f101bd0886ba7db22c6aa6df755ce0 | 54a67ad080756699138d083bd495da1dfa100d09 | refs/heads/master | 2020-03-21T16:31:22.158157 | 2018-03-07T16:52:26 | 2018-03-07T16:52:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,065 | r | Lesson1_LatticePlottingSystem_w2.R | # Week2 : Lesson 1: Lattice Plotting System
library(lattice)
library(datasets)
# Simple scatterplot
xyplot(Ozone~Wind, data = airquality)
# Convert Month to a factor variable
airquality <- transform(airquality, Month = factor(Month))
xyplot(Ozone~Wind | Month, data = airquality, layout = c(5, 1)) # by month
# Lattice behavior
p <- xyplot(Ozone~Wind, data = airquality)
print(p)
## Panel Functions
set.seed(10)
x <- rnorm(100)
f <- rep(0:1, each = 50)
y <- x + f - f * x + rnorm(100, sd = 0.5)
f <- factor(f, labels, c ("Group 1", "Group 2"))
xyplot(y~x | f, layout = c(2,1)) # plot with 2 panels
# Custom panel function
xyplot(y~x | f, panel = function(x, y, ...){
panel.xyplot(x, y, ...) # 1st call the default panel function for xyplot
panel.abline(h = median(y), lty = 2) # add a horizontal line at the median
})
# Custom panel function
xyplot(y~x | f, panel = function(x, y, ...){
panel.xyplot(x, y, ...) # 1st call the default panel function for xyplot
panel.lmline(x, y, col = 2) # overlay a simple linear regression line
})
|
43445899dd3c124dd5a260b25f36a345e7580ddf | 87a10b6ceddd21d6d0195f79648fa2fab473638d | /Food Services by County.R | 33f2cd065815fd47109bc916140e42e7a6b7dc7b | [] | no_license | vineetdcunha/Data_visualization | 8ef1b63a47e6f2082567b3f45367a096ed28ab8b | 83682c55c0d2a6ff25dbc97311296a2ca353071a | refs/heads/main | 2023-01-23T10:05:43.196622 | 2020-11-24T18:43:08 | 2020-11-24T18:43:08 | 313,099,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,237 | r | Food Services by County.R | library(tidyverse)
library(geojsonio)
library(RColorBrewer)
library(rgdal)
library(sf)
library(broom)
# Download the Hexagones boundaries at geojson format here: https://team.carto.com/u/andrew/tables/andrew.us_states_hexgrid/public/map.
spdf <- geojson_read("us_states_hexgrid.geojson", what = "sp")
# Bit of reformating
spdf@data = spdf@data %>%
mutate(google_name = gsub(" \\(United States\\)", "", google_name))
spdf@data = spdf@data %>% mutate(google_name = gsub(" \\(United States\\)", "", google_name))
spdf_fortified <- tidy(spdf, region = "google_name")
spdf_fortified_name <- tidy(spdf, region = "google_name")
FoodSrvcByCounty$county = as.character(FoodSrvcByCounty$County)
spdf_fortified$county = toupper(spdf_fortified$id)
spdf_fortified <- spdf_fortified %>%
left_join(. , FoodSrvcByCounty, by = c("county" = "county"))
head(spdf_fortified)
ggplot() +
geom_polygon(data = spdf_fortified,
aes(
x = long,
y = lat,
group = group,
fill = FoodServices.2007
)) +
theme_void() + labs(fill = 'Food Services - 2007',
title = "Food Services by State- 2007")
|
f2ebf5f917934428031c40c49ba1cdc6bc46b6b2 | 2e4afcf0f120a9d36ae9eee3c0d10df688c4cb37 | /js_RcircosPlotting.R | c5ae82f9f7fc29241b1673e5cccc5da4c3f5b937 | [] | no_license | CellFateNucOrg/afterMC-HiCplots | e7b83de0319371baa949696d1f34f1a1f19f3f2f | 8b3af55da3be3f60e3632a2052e46187d6e39a3b | refs/heads/master | 2022-02-11T07:54:11.044287 | 2022-01-31T17:53:41 | 2022-01-31T17:53:41 | 244,604,363 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,044 | r | js_RcircosPlotting.R | #' Prepare reads for plotting with Rcircos
#'
#' Input data frame should have column names ReadID, Chr, RefStart and RefEnd
#' @param readData - data frame of fragments detected in MC-HiC
#' @param readsToDraw - vector of read IDs
#' @return data frame with 6 columns for pairs of interacting loci
#' @export
prepareLinkData<-function(readData,readsToDraw) {
firstChr<-c()
firstStart<-c()
firstEnd<-c()
secondChr<-c()
secondStart<-c()
secondEnd<-c()
for (rd in readsToDraw) {
currentRead<-readData[readData$ReadID==rd,]
firstChr <- c(firstChr, currentRead$Chr[1:(dim(currentRead)[1]-1)])
firstStart <- c(firstStart, currentRead$RefStart[1:(dim(currentRead)[1]-1)])
firstEnd <- c(firstEnd, currentRead$RefEnd[1:(dim(currentRead)[1]-1)])
secondChr <- c(secondChr, currentRead$Chr[2:(dim(currentRead)[1])])
secondStart <- c(secondStart, currentRead$RefStart[2:(dim(currentRead)[1])])
secondEnd <- c(secondEnd, currentRead$RefEnd[2:(dim(currentRead)[1])])
}
RCircosLink<- data.frame(firstChr=firstChr,firstStart=firstStart,firstEnd=firstEnd,
secondChr=secondChr,secondStart=secondStart,secondEnd=secondEnd,
stringsAsFactors=F)
return(RCircosLink)
}
#' Prepare the core Rcircos plot for C. elegans data
#'
#' @param base.per.unit - integer for the size of the units that are plotted
#' @param chr.exclude - vector of names of chromosomes to exclude
#' @param track.inside - number of tracks to have inside the circle
#' @param track.outside - number of tracks to have outside the circle
#' @return plots ideogram
#' @export
baseRcircosCE<-function(base.per.unit=3000, chr.exclude=NULL, highlight.width=10, tracks.inside=1, tracks.outside=0){
Chrnames<-c("chrI","chrII","chrIII","chrIV","chrV","chrX","MtDNA") # used to get rid of mtDNA
ce11 <- list( "chrI" = 15072434,
"chrII" = 15279421,
"chrIII" = 13783801,
"chrIV" = 17493829,
"chrV" = 20924180,
"chrX" = 17718942,
"MtDNA" = 13794)
ce11.ideo<-data.frame(Choromsome=Chrnames,ChromStart=0,ChromEnd=unlist(ce11),Band=1,Stain="gvar")
cyto.info <- ce11.ideo
RCircos.Set.Core.Components(cyto.info, chr.exclude,tracks.inside, tracks.outside)
rcircos.params <- RCircos.Get.Plot.Parameters()
rcircos.params$base.per.unit<-base.per.unit
rcircos.params$chrom.width=0 #0.1
rcircos.params$highlight.width=highlight.width #1
RCircos.Reset.Plot.Parameters(rcircos.params)
RCircos.Set.Plot.Area()
par(mai=c(0.25, 0.25, 0.25, 0.25))
plot.window(c(-1.5,1.5), c(-1.5, 1.5))
RCircos.Chromosome.Ideogram.Plot()
}
#' Prepare a list of points of view for 4C
#'
#' Will use chromosome length to find positions at 20%, 50% and 80% of chromosome's
#' length to act as points of view for arms and center
#' @param chrLengthList - a named list with lengths of chromsomes
#' @param winSize - the size of the window around the POV for selecting interactions (must be an even number)
#' @return data.frame with points of view
#' @export
generatePOV<-function(chrLengthList=NULL,winSize=10000){
if (is.null(chrLengthList)){
chrLengthList <- list( "chrI" = 15072434,
"chrII" = 15279421,
"chrIII" = 13783801,
"chrIV" = 17493829,
"chrV" = 20924180,
"chrX" = 17718942)
chrLengthList<-(unlist(chrLengthList))
}
left<-round(0.2*chrLengthList/1000,0)*1000
center<-round(0.5*chrLengthList/1000,0)*1000
right<-round(0.8*chrLengthList/1000,0)*1000
names(left)<-paste(names(left),"left",sep="_")
names(right)<-paste(names(right),"right",sep="_")
names(center)<-paste(names(center),"center",sep="_")
POV<-data.frame(POVname=c(names(left),names(center),names(right)),
POVpos=c(left,center,right),row.names=NULL)
POV$chr<-gsub("_.*","",POV$POVname)
POV$start<-POV$POVpos-winSize/2
POV$end<-POV$POVpos+winSize/2
POV<-POV[order(POV$chr,POV$start),]
return(POV)
}
|
71c39d0b805961cdded955fab3ef74f21a8eff6c | fc89e754459db5c69cf7f22a3cedd16fbcfc60c0 | /man/print.PCAbiplot.Rd | 5e991caef6e8527b42ae786e642e901c1dd1c2d3 | [] | no_license | Displayr/flipDimensionReduction | 4a450aba63621ee63a9a2bd8551e94be791d8e52 | bd844f99b5666981c9e15e9dc457ec6698814119 | refs/heads/master | 2023-06-25T07:32:55.843774 | 2023-06-13T09:05:42 | 2023-06-13T09:05:42 | 59,715,778 | 12 | 6 | null | 2023-05-11T04:51:51 | 2016-05-26T03:11:08 | R | UTF-8 | R | false | true | 441 | rd | print.PCAbiplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/principalcomponentsbiplot.R
\name{print.PCAbiplot}
\alias{print.PCAbiplot}
\title{\code{print.PCAbiplot}}
\usage{
\method{print}{PCAbiplot}(x, ...)
}
\arguments{
\item{x}{An object created using \code{PrincipalComponentsBiplot}.}
\item{...}{Not used}
}
\description{
Plots biplot of PCA analysis, showing both the component loadings and scores simultaneously.
}
|
9c8ab84ec32ae6275a2bae294019ab3f35ad196d | b1f28e14d2b8079fa66d39d67c20fb53d2ee78e2 | /man/spearE.Rd | 1cbeeea4cb0e99237954c5a25529fe76e3bfd5e3 | [] | no_license | lucyov26/RankMetric | b506369a41bfa524ab723e2dd11fc60a505315f4 | d3c0bb0fd8b0910affaa0df0657918add9ccaf64 | refs/heads/master | 2020-03-23T18:00:29.363701 | 2018-11-01T21:08:19 | 2018-11-01T21:08:19 | 141,885,244 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 581 | rd | spearE.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rpack2.R
\name{spearE}
\alias{spearE}
\title{Spearman's Rho for rankings with Ties}
\usage{
spearE(x, y)
}
\arguments{
\item{x, y}{integer vectors}
}
\value{
Returns Spearman's rho between the two rankings.
}
\description{
Computes Spearman's rho between two rankings, where
items with equal ranking are now permitted. The number of items placed in the
ith category must be the same.
}
\examples{
a = c(3,1,2,2,3)
b = c(1,2,2,3,3)
spearE(a,b)
}
\author{
Lucy Small, \email{[email protected]}
}
|
9944e9f4e6458a58f53d345ced1e3888dbfd8ec8 | 5e6f223565e881eded9629a722dcc9d887479f83 | /man/remove_identation.Rd | d269973361592d02dc2ac10898059dae2b0fcf2d | [] | no_license | systats/semantic.doc | 058fdd708ddd218928c6187870366d16f0442115 | 33a0a1cb2ca4b91829faa70c18290b897ebe032f | refs/heads/master | 2020-04-03T23:53:58.350166 | 2018-12-02T19:23:59 | 2018-12-02T19:23:59 | 155,633,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 270 | rd | remove_identation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{remove_identation}
\alias{remove_identation}
\title{Removes HTML identation before cat}
\usage{
remove_identation(x)
}
\description{
Loads additional style and template file
}
|
4504238bc35c861acdf4c5858cacd0d7f7fb23e5 | e48a5a75c97b0e8b4d3c3b3f7f8484173baa7a3d | /ui.r | f86dac8e8e4f5b2dc3def2fa10b7a15ca612d861 | [] | no_license | patilv/bb50citiesrank | 08f3e08509c33ee6d409a2c8b81139dee51e1a2f | 1a407ba3cbff264ae676cdb3fdcf51c4fae59fe6 | refs/heads/master | 2021-01-01T19:24:31.875817 | 2013-06-24T10:35:23 | 2013-06-24T10:35:23 | 10,894,910 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,738 | r | ui.r | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("50 Best US Cities of 2012 - Ranked by Bloomberg Businessweek - Location and Characteristics"), #Application title
# User input for determine characteristic to use for sizing dots on map
sidebarPanel(
selectInput("var1", "Characteristic 1:",
choices =c(
"Number of Bars"="Bars",
"Population"="Population",
"Number of Restaurants"="Restaurants",
"Number of Museums"="Museums",
"Number of Libraries"="Libraries",
"Number of Pro Sports Teams"="Pro.Sports.Teams",
"Parks acres per 1000 residents"="Park.acres.per.1000.residents",
"Number of Colleges"="Colleges",
"Percent with Graduate Degree"="Percent.with.Graduate.Degree",
"Median Household Income"="Median.household.income",
"Percent Unemployed"="Percent.unemployed")
),
HTML("<br><br>"),
h5(textOutput("hits")),# Hit counter Output
HTML("<font size=1>Hit counter courtesy: <a href = 'http://www.econometricsbysimulation.com/2013/06/more-explorations-of-shiny.html' target='_blank'> Francis Smart</a></font>"),
HTML("<br><br>Application code <a href = 'https://github.com/patilv/bb50cities' target='_blank'> is available here.</a>")
),
mainPanel(
HTML("<a href ='http://www.businessweek.com/articles/2012-09-26/san-francisco-is-americas-best-city-in-2012' target='_blank'> Original article </a> <font color='red'>
required one to click on 50 slides to find the best city --- Advertising is important, right? </font><hr> "),
tabsetPanel(
# viewing the map
tabPanel("Geographic Locations",HTML ("<div> <font color='red'>Please be patient for all dots to show. Actually, the 50th dot will be the best city :-) </font> <br>1. Color indicates rank of the city - LOWER VALUE IS BETTER (see the color coding below)
<br>2. Size of dot indicates value of selected Characteristic - LARGER dot is HIGHER value <br>
3. You can hover over the dots to know the city and value of the characteristic</div>"),htmlOutput("gvisgeoplot")
),
# for scatter plot
tabPanel ("Characteristics and Cities", HTML("<div> <font color='red'>This chart can be played around with in the following ways.</font><br>
1. The two small tabs on the top right show either bubble charts or bar graphs, depending on what's selected. <br>
2. The horizontal and vertical axes can be changed to other variables by clicking at existing axis labels (the arrow mark) <br>
3. Size of dot indicates Rank of the city - LARGER size is BETTER ranked. (The rank is 51 minus the displayed value. This shows as variable 'RankReordered' in list in the axes.
See below the plot for explanation.)<br>
4. You can hover over the dots to know the city and value of the characteristics<br>
5. Color is used to identify the city. No other purpose.<br><br></div>"),htmlOutput("scatterplot"),
HTML("<div><font size=1>Since ranking of 1 is better than 50, the default approach would've placed smaller dots for better ranked cities.
So, to have larger bubbles for better cities a reranking was done to have a higher value for better city (51 minus the 'real' rank).
This was done only for this plot.</font></div>")),
tabPanel("Data", htmlOutput("bestcitiesdata")) # viewing data
)))) |
54e5a7076b9ff98e8bbb8c87a838f75b5ddcc12c | e141aebdf1eee3f692848a88e6a4ef1db6b854a9 | /plot2.R | e18d95007a883045442199bd99e8dde1d245aeeb | [] | no_license | bobb72/ExData_Plotting1 | fd9fe3dea393c40cd6928919e9c35b3181d0ab3c | 94eacf8c495e3f6ac7337c047e0ee5da9a391ad2 | refs/heads/master | 2021-01-23T21:03:23.022982 | 2016-06-08T10:26:05 | 2016-06-08T10:26:05 | 60,681,145 | 0 | 0 | null | 2016-06-08T08:20:22 | 2016-06-08T08:20:22 | null | UTF-8 | R | false | false | 1,192 | r | plot2.R | # Here is the data for the project:
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# Create an R script called plot2.R that reproduces the plot as per course instructions
#setwd("C:/Users/Bob/Desktop/DS Specialization/4_Exploratory_Data_Analysis/w1_assignment/exdata_data_household_power_consumption")
#fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#download.file(fileUrl,destfile="../Electric power consumption.zip")
#unzip(zipfile="../Electric power consumption.zip")
# Read file:
house_pwr_cons <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors=FALSE, na.strings = "?")
house_pwr_cons_sub <- house_pwr_cons[house_pwr_cons$Date %in% c("1/2/2007","2/2/2007"), ]
rm("house_pwr_cons")
# Create 2nd plot:
globalActivePower <- as.numeric(house_pwr_cons_sub$Global_active_power)
timeStamp <- strptime(paste(house_pwr_cons_sub$Date, house_pwr_cons_sub$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("ExData_Plotting1/plot2.png", width=480, height=480)
plot(timeStamp, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
2da797878ff65d5df11d5bb9a8c0f1a06bd422b4 | 9cc0308c75c50b5869c783fdd83fb00d36703e98 | /R/Time_Series.R | 8daabdead67b75dfb44f50408048f5711e61f9c6 | [] | no_license | SantiagoGallon/TimeSeries | 2cbf0157cb195bf8b213357f915739cf91d04592 | c40bdd4733796d5ba6f22e0a799afdadd56937da | refs/heads/master | 2020-09-01T01:47:32.635616 | 2019-11-01T22:59:15 | 2019-11-01T22:59:15 | 218,847,481 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,672 | r | Time_Series.R | rm(list = ls())
ls()
library(astsa)
library(forecast)
library(fma)
# Import data
# Índice Accionario de Capitalización Bursatil, Bolsa de Valores de Colombia (daily 15/01/2008 - 22/06/2016)
data <- read.table("/Users/Santiago/Dropbox/Teaching/Time Series/data/colcap.txt", sep="\t", header=TRUE)
x <- ts(data[,2], start=c(2008,15,1), end=c(2016,22,6), frequency=256.25)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/colcap.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(1,1), mar=c(2,4,0.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab="Index", xlab ="")
dev.off()
# Índice mensual de actividad económica -IMACO- (monthly 01/1992 - 05/2015)
data <- read.table("/Users/Santiago/Dropbox/Teaching/Time Series/data/imaco.txt", sep="\t", header=TRUE)
x <- ts(data[,3], start=c(1992,1), end=c(2015,5), frequency=12)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/imaco.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(1,1), mar=c(2,4,0.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab="Index", xlab ="")
dev.off()
# Consumption Price Index (monthly jul/54 - may/16)
data <- read.table("/Users/Santiago/Dropbox/Teaching/Time Series/data/ipc.txt", sep="\t", header=TRUE)
x <- ts(data[,2], start=c(1954,7), end=c(2016,5), frequency=12)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/ipc.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(1,1), mar=c(2,4,0.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab="Index", xlab ="")
dev.off()
# Industrial Production Index (monthly 01/1980 - 04/2016)
data <- read.table("/Users/Santiago/Dropbox/Teaching/Time Series/data/ipi.txt", sep="\t", header=TRUE)
x <- ts(data[,2], start=c(1980,1), end=c(2016,4), frequency=12)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/ipi.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(1,1), mar=c(2,4,0.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab="Index", xlab ="")
dev.off()
# Gross Domestic Product (quarterly 2000Q1-2015Q4)
data <- read.table("/Users/Santiago/Dropbox/Teaching/Time Series/data/gdp.txt", sep="\t", header=TRUE)
x <- ts(data[,2]/1000, start=c(2000,1), end=c(2015,4), frequency=4)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/gdp.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(1,1), mar=c(2,4.5,0.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab="Thousands of millions (pesos)", xlab ="")
dev.off()
# Exchange rate (pesos/dollar) (daily 27/11/1991-24/06/2016)
data <- read.table("/Users/Santiago/Dropbox/Teaching/Time Series/data/trm.txt", sep="\t", header=TRUE)
x <- ts(data[,2], start=c(1991,11), end=c(2016,6), frequency=359.24)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/trm.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(1,2), mar=c(2,4,0.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab="", xlab ="")
ts.plot(diff(log(x)), ylab="", xlab ="")
dev.off()
# West Texas Intermediate - WTI - Crude Oil Price (1986 -2015). Source: http://www.eia.gov/
library(xlsx)
data_d <- read.xlsx("/Users/santiagogallon/Dropbox/Teaching/Time Series/data/wti.xls", header=TRUE, sheetIndex = 1)
data_w <- read.xlsx("/Users/santiagogallon/Dropbox/Teaching/Time Series/data/wti.xls", header=TRUE, sheetIndex = 2)
data_m <- read.xlsx("/Users/santiagogallon/Dropbox/Teaching/Time Series/data/wti.xls", header=TRUE, sheetIndex = 3)
data_y <- read.xlsx("/Users/santiagogallon/Dropbox/Teaching/Time Series/data/wti.xls", header=TRUE, sheetIndex = 4)
x_d <- ts(data_d[1:7568,2], start=c(1986,1,2), frequency=365)
x_w <- ts(data_w[1:1565,2], start=c(1986,1), end=c(2015,52), frequency=53)
x_m <- ts(data_m[1:360,2], start=c(1986,1), end=c(2015,12), frequency=12)
x_y <- ts(data_y[,2], start=c(1986), end=2015, frequency=1)
postscript("/Users/santiagogallon/Dropbox/Teaching/Time Series/Slides/wti.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(2,2), mar=c(2,4.2,1.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x_d, main= "Daily", ylab="US dollars per barrel", xlab ="")
ts.plot(x_w, main= "Weekly", ylab="US dollars per barrel", xlab ="")
ts.plot(x_m, main= "Monthly", ylab="US dollars per barrel", xlab ="")
ts.plot(x_y, main= "Yearly", ylab="US dollars per barrel", xlab ="")
dev.off()
# Monthly totals (in thousands) of international airline passengers between 1949 and 1960. Source: Box-Jenkins
postscript("/Users/santiagogallon/Dropbox/Teaching/Time Series/Slides/air.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(1,1), mar = c(2,5.4,0.2,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(AirPassengers, ylab="", xlab="")
title(ylab="No. of passengers (in thousands)", line=4)
dev.off()
# L.A. Pollution Study. The scales are different, mortality, temperature, and emissions (weekly 1970 - 1980)
postscript("/Users/santiagogallon/Dropbox/Teaching/Time Series/Slides/pollu.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(3,1), mar=c(0,4.8,0,0.2), oma=c(4,0,0.2,0), las=1, cex.axis=1.5, cex.lab=1.5, tcl=-.3)
plot(cmort, ylab="No. of Deaths", xaxt="no", type='n')
#grid(lty=1, col=gray(.9))
lines(cmort, col="blue")
#text(1974, 132, 'Bad Year', col=rgb(.5,0,.5), cex=1.25) # just for fun
#arrows(1973.5, 130, 1973, 127, length=0.05, angle=30, col=rgb(.5,0,.5))
plot(tempr, ylab=expression(~Temperature~(degree~F)), xaxt="no", type='n')
#grid(lty=1, col=gray(.9))
lines(tempr, col="red")
plot(part, ylab="Emissions (PPM)")
#grid(lty=1, col=gray(.9))
title(xlab="Time (week)", outer=TRUE)
dev.off()
# Seasonal series
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/seas.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(3,2), mar=c(4,4.3,0.7,0.7), las=0, cex.axis=1.5, cex.lab=1.5, tcl=-.3)
plot(gas, ylab="AU monthly gas production") # Australian monthly gas production: 1956–1995
plot(taylor, ylab="UK half-hourly electricity demand") # Half-hourly electricity demand in England and Wales from Monday 5 June 2000 to Sunday 27 August 2000
plot(wineind, ylab="AU monthly wine sales") # Australian total wine sales by wine makers in bottles <= 1 litre. Jan 1980 – Aug 1994
plot(USAccDeaths, ylab="US monthly accidental deaths") # Accidental Deaths in the US 1973-1978
plot(milk, ylab="Monthly milk production") #Monthly milk production per cow
plot(part, ylab="LA weekly particulate levels") # Particulate levels from the LA pollution study
#plot(birth, ylab="US Monthly live births") # Monthly live births (adjusted) in thousands for the United States, 1948-1979.
#plot(woolyrnq) # Quarterly production of woollen yarn in Australia: tonnes. Mar 1965 – Sep 1994
#plot(ldeaths)
#plot(unemp)# Monthly U.S. Unemployment series 1948-1978
dev.off()
# Simulated AR(1) processes
x_1 <- arima.sim(list(order=c(1,0,0), ar= 0.8), n=250)
x_2 <- arima.sim(list(order=c(1,0,0), ar=-0.8), n=250)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/ar_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(3,2), mar=c(4.2,4.5,1.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x_1, ylab=expression(X[t]), main=(expression(AR(1)~~~phi==0.8)))
Acf(x_1, main="", xlab=expression(k))
Pacf(x_1, main="", ylab="PACF", xlab=expression(k))
#
ts.plot(x_2, ylab=expression(X[t]), main=(expression(AR(1)~~~phi==-0.8)))
Acf(x_2, main="", xlab=expression(k))
Pacf(x_2, main="", ylab="PACF", xlab=expression(k))
#
dev.off()
# Simulated AR(2) processes
x_1 <- arima.sim(list(order=c(2,0,0), ar=c(0.5,-0.8)), n=250)
x_2 <- arima.sim(list(order=c(2,0,0), ar=c(0.6,0.3)), n=250)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/ar_2.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(3,2), mar=c(4.2,4.5,1.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x_1, ylab=expression(X[t]), main=(expression(AR(2)~~~phi[1]==0.5~~phi[2]==-0.8)))
Acf(x_1, main="", xlab=expression(k))
Pacf(x_1, main="", ylab="PACF", xlab=expression(k))
#
ts.plot(x_2, ylab=expression(X[t]), main=(expression(AR(2)~~~phi[1]==0.6~~phi[2]==0.3)))
Acf(x_2, main="", xlab=expression(k))
Pacf(x_2, main="", ylab="PACF", xlab=expression(k))
#
dev.off()
# Stationary regions for an AR(2) process
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/roots.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(1,1), mar=c(4,4.4,0.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
curve(1+x, -2,2, ylim=c(-0.929,0.929), xlab=expression(phi[1]), ylab=expression(phi[2]))
curve(1-x, -2,2, add=TRUE)
curve((-x^2)/4, -2,2, add=TRUE)
text(0,.4,"Real roots", cex=1.5)
text(0,.32,expression(abs(lambda[1])<1~~~abs(lambda[2])<1), cex=1.5)
text(-1.05,.4,expression(phi[2]==1+phi[1]), cex=1.5)
arrows(-0.8,0.4, -0.6,0.4, length=0.1, angle=15)
text(1.05,.4,expression(phi[2]==1-phi[1]), cex=1.5)
arrows(0.8,0.4, 0.6,0.4, length=0.1, angle=15)
text(0, -.5,"Complex roots", cex=1.5)
text(0,-0.59,expression(abs(lambda[1])<1~~~abs(lambda[2])<1), cex=1.5)
text(0,-0.13,expression(phi[1]^2+4*phi[2]==0), cex=1.5)
arrows(0,-0.08, 0,0, length=0.1, angle=15)
abline(0.0015,0, col="gray70", lty=2)
dev.off()
# Simulated MA(1) processes
x_1 <- arima.sim(list(order=c(0,0,1), ma= 0.8), n=250)
x_2 <- arima.sim(list(order=c(0,0,1), ma=-0.8), n=250)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/ma_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(3,2), mar=c(4.2,4.5,1.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x_1, ylab=expression(X[t]), main=(expression(MA(1)~~~theta==0.8)))
Acf(x_1, main="", xlab=expression(k))
Pacf(x_1, main="", ylab="PACF", xlab=expression(k))
#
ts.plot(x_2, ylab=expression(X[t]), main=(expression(MA(1)~~~theta==-0.8)))
Acf(x_2, main="", xlab=expression(k))
Pacf(x_2, main="", ylab="PACF", xlab=expression(k))
#
dev.off()
# Simulated MA(2) processes
x_1 <- arima.sim(list(order=c(0,0,2), ma=c(0.3,0.3)), n=250)
x_2 <- arima.sim(list(order=c(0,0,2), ma=c(0.6,-0.4)), n=250)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/ma_2.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(3,2), mar=c(4.2,4.5,1.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x_1, ylab=expression(X[t]), main=(expression(MA(2)~~~theta[1]==0.3~~theta[2]==0.3)))
Acf(x_1, main="", xlab=expression(k))
Pacf(x_1, main="", ylab="PACF", xlab=expression(k))
#
ts.plot(x_2, ylab=expression(X[t]), main=(expression(MA(2)~~~theta[1]==0.6~~theta[2]==-0.4)))
Acf(x_2, main="", xlab=expression(k))
Pacf(x_2, main="", ylab="PACF", xlab=expression(k))
#
dev.off()
# Simulated ARMA(1,1) processes
x_1 <- arima.sim(list(order=c(1,0,1), ar=0.8, ma=0.6), n=250)
x_2 <- arima.sim(list(order=c(1,0,1), ar=0.9, ma=-0.5), n=250) # ar=0.9, ma=-0.5
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/arma_1_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(3,2), mar=c(4.2,4.5,1.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x_1, ylab=expression(X[t]), main=(expression(ARMA(1,1)~~~phi[1]==0.8~~theta[1]==0.6)))
Acf(x_1, main="", xlab=expression(k))
Pacf(x_1, main="", ylab="PACF", xlab=expression(k))
#
ts.plot(x_2, ylab=expression(X[t]), main=(expression(ARMA(1,1)~~~phi[1]==0.9~~theta[1]==-0.5)))
Acf(x_2, main="", xlab=expression(k))
Pacf(x_2, main="", ylab="PACF", xlab=expression(k))
#
dev.off()
# Stationary AR(1) and Random walk processes with drift alpha
n <- 250
delta <- 0.5
phi <- 0.9
#
x <- y <- z <- numeric(n)
t <- seq(1,n,1)
for(i in 2:n){
x[i] <- delta + x[i-1] + rnorm(1,0,1)
y[i] <- delta + phi*y[i-1] + rnorm(1,0,1)
}
# Deterministic trend
for(i in 1:n){
z[i] <- delta*t[i] + rnorm(1,0,2)
}
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/rw.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(1,1), mar=c(4.2,3.3,0.2,0.5), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab="", main="")
lines(delta*t, lty=2, col ="gray70")
text(190,107,expression(E(Y[t])==delta~t), cex=1.1)
arrows(194,105, 200,101, length=0.1, angle=15)
lines(y, lty=5)
abline(h=mean(y), lty=2, col ="gray70")
text(110,12,expression(E(X[t])==delta/(1-phi[1])), cex=1.1)
arrows(106,10, 110,6, length=0.1, angle=15)
legend("topleft",title="",legend=c("Random Walk with Drift","Stationary AR(1) with Drift"), lty=c(1,5), cex=1.2, bty="n")
dev.off()
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/acf_rw.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(1,2), mar=c(4.2,4.5,3,0.5), las=1, cex.axis=1.5, cex.lab=1.5)
Acf(x, ylab="ACF", xlab=expression(k), main="Random Walk with Drift")
Acf(y, ylab="ACF", xlab=expression(k), main="Stationary AR(1) with Drift", ylim=c(-0.2,1))
dev.off()
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/dt.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(1,2), mar=c(4.2,4.5,1.5,0.3), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(z, ylab=expression(X[t]), main="Trend-stationary")
lines(predict(lm(z~t-1)), col=2)
ts.plot(residuals(lm(z~t-1)), ylab=expression(X[t]-DT[t]), main="Detrending")
dev.off()
summary(lm(z~t-1))
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/rw_ts.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(1,2), mar=c(4.2,4.5,1.5,0.3), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(z, ylab=expression(X[t]), main="Trend-stationary")
ts.plot(x, ylab="", main="Random walk with drift")
dev.off()
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/rw_diff.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(1,2), mar=c(4.2,4.5,1.5,0.5), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, ylab=(expression(X[t])), main="Random Walk with Drift")
ts.plot(diff(x), ylab=(expression(Delta~X[t])), main="First difference")
dev.off()
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/acf_rw_diff.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(1,2), mar=c(4.2,4.5,3,0.5), las=1, cex.axis=1.5, cex.lab=1.5)
Acf(x, ylab="ACF", xlab=expression(k), main="Random Walk with Drift")
Acf(diff(x), ylab="ACF", xlab=expression(k), main="First difference", ylim=c(-0.2,1))
dev.off()
# x <- arima.sim(list(order=c(1,0,0), ar=0.9), mean=0.5, n=250)
# y <- arima.sim(list(order=c(0,1,0)), mean=0.5, n=250)
# y <- 1:101 * b +arima.sim(list(order=c(1,1,0), ar=0), n=250)
# Simulated ARIMA(1,1,1) processes
x_1 <- arima.sim(list(order=c(1,1,0), ar=0.8), n=250) # ARIMA(1,1,0) or ARI(1,1)
x_2 <- arima.sim(list(order=c(0,1,1), ma=0.75), n=250) # ARIMA(0,1,1) or IMA(1,1)
x_3 <- arima.sim(list(order=c(1,1,1), ar=0.9, ma=0.5), n=250) # ARIMA(1,1,1)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/arima_1_1_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(3,3), mar=c(4.2,4.5,1.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x_1, ylab=expression(X[t]), main=(expression(ARIMA(1,1,0)~~~phi[1]==0.8)))
Acf(x_1, main="", xlab=expression(k))
Pacf(x_1, main="", ylab="PACF", xlab=expression(k))
#
ts.plot(x_2, ylab=expression(X[t]), main=(expression(ARIMA(0,1,1)~~~theta[1]==0.75)))
Acf(x_2, main="", xlab=expression(k))
Pacf(x_2, main="", ylab="PACF", xlab=expression(k))
#
ts.plot(x_3, ylab=expression(X[t]), main=(expression(ARIMA(1,1,1)~~~phi[1]==0.9~~theta[1]==0.5)))
Acf(x_3, main="", xlab=expression(k))
Pacf(x_3, main="", ylab="PACF", xlab=expression(k))
#
dev.off()
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/diff_arima_1_1_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfcol=c(3,3), mar=c(4.2,4.5,1.5,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(diff(x_1), ylab=expression(Delta~X[t]), main=(expression(Delta~X[t]~~of~~ARIMA(1,1,0)~~~phi[1]==0.8)))
Acf(diff(x_1), main="", xlab=expression(k))
Pacf(diff(x_1), main="", ylab="PACF", xlab=expression(k))
#
ts.plot(diff(x_2), ylab=expression(Delta~X[t]), main=(expression(Delta~X[t]~~of~~ARIMA(0,1,1)~~~theta[1]==0.75)))
Acf(diff(x_2), main="", xlab=expression(k))
Pacf(diff(x_2), main="", ylab="PACF", xlab=expression(k))
#
ts.plot(diff(x_3), ylab=expression(Delta~X[t]), main=(expression(Delta~X[t]~~of~~ARIMA(1,1,1)~~~phi[1]==0.9~~theta[1]==0.5)))
Acf(diff(x_3), main="", xlab=expression(k))
Pacf(diff(x_3), main="", ylab="PACF", xlab=expression(k))
#
dev.off()
# Seasonal Processes
# Seasonal AR(1) model
Phi <- c(rep(0,11),0.9)
sAR <- arima.sim(list(order=c(12,0,0), ar=Phi), n=37)
sAR <- ts(sAR, freq=12)
dev.off()
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/sar_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
layout(matrix(c(1,2, 1,3), nc=2))
par(mar=c(3,3.5,2,0.2), mgp=c(1.6,.6,0), las=0, cex.axis=1.5, cex.lab=1.5)
plot(sAR, axes=FALSE, main="Seasonal AR(1)", ylab=expression(X[t]), xlab="year", type="c")
months = c("J","F","M","A","M","J","J","A","S","O","N","D")
points(sAR, pch=months, cex=1, font=1, col=1:12)
axis(1, 1:4)
abline(v=1:4, lty=2, col=gray(.6))
axis(2)
box()
ACF <- ARMAacf(ar=Phi, ma=0, 100)[-1]
PACF <- ARMAacf(ar=Phi, ma=0, 100, pacf=TRUE)
plot(ACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
plot(PACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
dev.off()
# Seasonal MA(1) model
Theta <- c(rep(0,11),0.5)
sMA <- arima.sim(list(order=c(0,0,12), ma=Theta), n=37)
sMA <- ts(sMA, freq=12)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/sma_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
layout(matrix(c(1,2, 1,3), nc=2))
par(mar=c(3,3.5,2,0.2), mgp=c(1.6,.6,0), las=0, cex.axis=1.5, cex.lab=1.5)
plot(sMA, axes=FALSE, main="Seasonal MA(1)", ylab=expression(X[t]), xlab="year", type="c")
months = c("J","F","M","A","M","J","J","A","S","O","N","D")
points(sMA, pch=months, cex=1, font=1, col=1:12)
axis(1, 1:4)
abline(v=1:4, lty=2, col=gray(.6))
axis(2)
box()
ACF <- ARMAacf(ar=0, ma=Theta, 100)[-1]
PACF <- ARMAacf(ar=0, ma=Theta, 100, pacf=TRUE)
plot(ACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
plot(PACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
dev.off()
# Seasonal ARMA(1,1) model
Phi <- c(rep(0,11),0.8)
Theta <- c(rep(0,11),-0.5)
sARMA <- arima.sim(list(order=c(12,0,12), ar=Phi, ma=Theta), n=37)
sARMA <- ts(sARMA, freq=12)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/sarma_1_1.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
layout(matrix(c(1,2, 1,3), nc=2))
par(mar=c(3,3.5,2,0.2), mgp=c(1.6,.6,0), las=0, cex.axis=1.5, cex.lab=1.5)
plot(sARMA, axes=FALSE, main="Seasonal ARMA(1,1)", ylab=expression(X[t]), xlab="year", type="c")
months = c("J","F","M","A","M","J","J","A","S","O","N","D")
points(sARMA, pch=months, cex=1, font=1, col=1:12)
axis(1, 1:4)
abline(v=1:4, lty=2, col=gray(.6))
axis(2)
box()
ACF <- ARMAacf(ar=Phi, ma=Theta, 100)[-1]
PACF <- ARMAacf(ar=Phi, ma=Theta, 100, pacf=TRUE)
plot(ACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
plot(PACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
dev.off()
# Seasonal Multiplicative ARMA(0,1)x(1,0)_12
Phi <- c(rep(0,11),0.8)
theta <- -0.5
smARMA <- arima.sim(list(order=c(12,0,1), ar=Phi, ma=theta), n=37)
smARMA <- ts(smARMA, freq=12)
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/sarma_1_1_1_0.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
layout(matrix(c(1,2, 1,3), nc=2))
par(mar=c(3,3.5,2,0.2), mgp=c(1.6,.6,0), las=0, cex.axis=1.5, cex.lab=1.5)
plot(smARMA, axes=FALSE, main="Seasonal Multiplicative ARMA(0,1)x(1,0)_12", ylab=expression(X[t]), xlab="year", type="c")
months = c("J","F","M","A","M","J","J","A","S","O","N","D")
points(smARMA, pch=months, cex=1, font=1, col=1:12)
axis(1, 1:4)
abline(v=1:4, lty=2, col=gray(.6))
axis(2)
box()
ACF <- ARMAacf(ar=Phi, ma=theta, 100)[-1]
PACF <- ARMAacf(ar=Phi, ma=theta, 100, pacf=TRUE)
plot(ACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
plot(PACF, axes=FALSE, type="h", xlab=expression(k), ylim=c(-1,1))
axis(1, seq(0,96,12))
axis(2)
abline(h=0)
box()
dev.off()
# Deterministic Seasonallity
n <- 160
x <- numeric(n)
t <- ts(seq(1,n,1), freq=4)
S <- seasonaldummy(t)
s4 <- 1-rowSums(S)
S <- cbind(S,s4)
for(i in 1:n){
x[i] <- 6*S[i,1] + 8*S[i,2] - 4*S[i,3] + 5*S[i,4] + rnorm(1,0,2)
}
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/ds.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(2,2), mar=c(4.2,4.5,2.7,0.3), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(x, main=expression(X[t]), ylab="")
lines(predict(lm(x~S-1)), col=2)
ts.plot(residuals(lm(x~t+S-1)), main=expression(X[t]-sum(hat(beta)[j]~D[jt], j==1, s)), ylab="")
Acf(x, ylab="ACF", xlab=expression(k), main="", ylim=c(-1,1))
Acf(residuals(lm(x~t+S-1)), ylab="ACF", xlab=expression(k), main="", ylim=c(-1,1))
dev.off()
# Seasonal Multiplicative ARIMA(0,1,1)x(0,1,1)_12
library(forecast)
model <- Arima(ts(rnorm(100),freq=12), order=c(0,1,1), seasonal=c(0,1,1), fixed=c(theta=-0.4, Theta=-0.6))
smARIMA <- simulate(model, nsim=48)
smARIMA <- ts(smARIMA[9:44], freq=12)
dev.off()
layout(matrix(c(1,2, 1,3), nc=2))
par(mar=c(3,3.5,2,0.2), mgp=c(1.6,.6,0), las=0, cex.axis=1.5, cex.lab=1.5)
plot(smARIMA, axes=FALSE, main="Seasonal Multiplicative ARMA(0,1)x(1,0)_12", ylab=expression(X[t]), xlab="year", type="c")
months = c("J","F","M","A","M","J","J","A","S","O","N","D")
points(smARIMA, pch=months, cex=1, font=1, col=1:12)
axis(1, 1:4)
abline(v=1:4, lty=2, col=gray(.6))
axis(2)
box()
Acf(smARIMA, main="", xlab=expression(k), ylim=c(-1,1))
Pacf(smARIMA, main="", ylab="PACF", xlab=expression(k), ylim=c(-1,1))
# Also the "sarima" package can be used
library(sarima)
sAR <-sim_sarima(n=48, model=list(sar=0.9, nseasons=12)) # SAR(1)
sMA <-sim_sarima(n=48, model=list(sma=0.5, nseasons=12)) # SMA(1)
sARMA <-sim_sarima(n=48, model=list(sar=0.8, sma=-0.5, nseasons=12)) # SARMA(1,1)
smARMA <-sim_sarima(n=48, model=list(ma=-0.5, sar=0.8, nseasons=12)) # Seasonal Multiplicative ARMA(0,1)x(1,0)_12
smARIMA <- sim_sarima(n=48, model=list(ma=-0.4, iorder=1, siorder=1, sma=-0.6, nseasons=12)) # Seasonal Multiplicative ARIMA(0,1,1)x(0,1,1)_12
plot(smARIMA[13:48], axes=FALSE, main="Seasonal AR(1)", ylab=expression(X[t]), xlab="time", type="c")
months = c("J","F","M","A","M","J","J","A","S","O","N","D")
points(smARIMA[13:48], pch=months, cex=1, font=1, col=1:12)
axis(1, 0:36)
abline(v=c(12,24,36), lty=2, col=gray(.6))
axis(2)
box()
# Spurious Regression
# Le be two random walk processes
n <- 250
#
x <- y <- numeric(n)
t <- seq(1,n,1)
for(i in 2:n){
x[i] <- x[i-1] + rnorm(1,0,1)
y[i] <- y[i-1] + rnorm(1,0,1)
}
postscript("/Users/Santiago/Dropbox/Teaching/Time Series/Slides/spurious.eps",width = 15.5, height = 8.5, horizontal = TRUE, onefile = FALSE, paper = "a4")
par(mfrow=c(2,2), mar=c(4,4.2,1,0.2), las=1, cex.axis=1.5, cex.lab=1.5)
ts.plot(y, main=expression(y[t]==y[t-1]+u[t]), ylab="", xlab="")
ts.plot(x, main=expression(x[t]==x[t-1]+v[t]), ylab="", xlab="")
plot(x,y, xlab = expression(x[t]), ylab=expression(y[t]))
abline(lm(y~x), col="red")
ts.plot(residuals(lm(y~x)), main=expression(epsilon[t]==y[t]+2.6917-0.5055*x[t]), ylab="", xlab="")
dev.off()
#fit <- Arima(foo, order=c(0,1,1), seasonal=c(0,1,1))
#model <- Arima(ts(rnorm(100),freq=4), order=c(1,1,1), seasonal=c(1,1,1), fixed=c(phi=0.5, theta=-0.4, Phi=0.3, Theta=-0.2))
# http://stackoverflow.com/questions/20273104/simulating-a-basic-sarima-model-in-r
#library(gmwm)
# Specify a SARIMA(2,1,1)(1,1,1)[12]
#mod = SARIMA(ar=c(.3,.5), i=1, ma=.1, sar=.2, si = 1, sma = .4, s = 12, sigma2 = 1.5)
# Generate the data
#xt2 = gen.gts(mod, 1e3)
|
1075e3cff2e78edf30dea16a30c5360b51512a3a | dc3665fa074c42cd25d3eca313b90f4ae4482520 | /vendor_behavior.R | 29c382822735c9e2c2d05a9f9dbadf9b15b404e9 | [] | no_license | andfdiazrod/darkweb_functions | 5f6a350e6902bfbb9a9ce8886425ed62c48dbf3e | b8f20f47c916494103a9f7f2f418ed2a39f80b6d | refs/heads/master | 2022-05-16T02:01:45.786947 | 2019-11-29T16:53:37 | 2019-11-29T16:53:37 | 216,660,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,605 | r | vendor_behavior.R | vendor_behavior <- function(df){
columns = c("day_format", 'vendor_name',"in_sample","appears", "consistency",
"relative_consistency")
vendor_consistency <- data.frame(matrix(ncol=length(columns)))
colnames(vendor_consistency) <- columns
vendor_consistency$day_format <- as.Date(vendor_consistency$day_format)
sum_up_to <- function(x){
return(unlist(lapply(1:length(x),function(y) sum(x[1:y]))))
}
for(vn in unique(df$vendor_name)){
vn_day <- data.frame(day_format = unique(as.Date(info_total %>%
filter(vendor_name == vn) %>% pull(day_format))),
appears = 1)
vn_day_range <- read.csv('time_series.csv', stringsAsFactors = F)
vn_day_range$day_format <- as.Date(vn_day_range$day_format)
vn_day_range <- vn_day_range %>%
filter(day_format %in% seq.Date(min(vn_day[,1]),
max(vn_day[,1]),1))
vn_day_range <- na.omit(left_join(vn_day_range, vn_day, by='day_format') %>%
replace_na(list(appears = -1)))
vn_day_range$consistency <- sum_up_to(vn_day_range$appears)
vn_day_range$relative_consistency <- vn_day_range$consistency/(1:nrow(vn_day_range))
vn_day_range$vendor_name <- vn
vendor_consistency <- rbind(vendor_consistency, vn_day_range)
}
return(vendor_consistency)
}
if(FALSE){
a = vendor_consistency %>% group_by(day_format) %>%
summarise(a = median(relative_consistency, na.rm=T))
plot(a, type='l')
b <- left_join(date_range,a,by='day_format')
}
|
69170b9ed1285a26df786dda9db67a76a136ec4b | f8c92559534dba1aaec173f86b22bfd2bff913bc | /Lecture 1/hw1_factor8.R | fbff896114e429129a6ddeff0a094d9cabb093c0 | [] | no_license | Zijie-Xia/GR5206-Introduction-to-Data-Science | 499f8b2999a194431891fb6c019e82542ca111bd | f017e55171a79f1833e8ad4fe0b9697a8634678e | refs/heads/master | 2020-12-07T07:06:56.675558 | 2020-01-08T22:01:39 | 2020-01-08T22:01:39 | 232,179,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,002 | r | hw1_factor8.R | # HW1: factor8
#
# 1. Create an ordered factor `f1` consist of letters 'a' to 'z' ordered alphabetically.
# 2. Create an ordered factor `f2` consist of letters 'a' to 'z' in descending alphabetical order.
# 3. Create a 30 elements, ordered factor `f3` consist of letters 'a' to 'z' followed by 4 NA. The order of `f3` is 'a'<...<'z'<NA.
# 4. Delete the element 'c' with the level 'c' and assign it to `f4`.
## Do not modify this line! ## Write your code for 1. after this line! ##
f1<-factor(letters[1:26],order=TRUE)
## Do not modify this line! ## Write your code for 2. after this line! ##
f2<-factor(letters[1:26],order=TRUE,levels=letters[26:1])
## Do not modify this line! ## Write your code for 3. after this line! ##
f3<-factor(c(letters[1:26],NA,NA,NA,NA),order=TRUE,levels=c(letters[1:26],NA),exclude=TRUE)
## Do not modify this line! ## Write your code for 4. after this line! ##
f4<-factor(c("a","b",letters[4:26],NA,NA,NA,NA),order=TRUE,levels=c("a","b",letters[4:26],NA),exclude=TRUE)
|
0b133bf89d70c4ffdc4a35b6638ac5e6ac069a18 | 9d680e799f36291ef0406729e61315b8b3d9d0a1 | /man/UNIMANtransient.Rd | b0cd06bfdd1d649fb46422754ab18e49b5cd2592 | [] | no_license | cran/chemosensors | ffe070d193178a9274c6273fbdea6e256d028550 | b8bf614e42a6b0bea7c4eb5eec14c06f679d17b1 | refs/heads/master | 2021-01-01T16:59:55.106040 | 2014-08-31T00:00:00 | 2014-08-31T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 171 | rd | UNIMANtransient.Rd | \docType{data}
\name{UNIMANtransient}
\alias{UNIMANtransient}
\title{Dataset UNIMANtransient}
\description{
Dataset UNIMANtransient
}
\keyword{data}
\keyword{datasets}
|
13b38bd70df1c10c75b0a356f69cc07a7161787a | facce126b08e76ad542ff63258afe1e327e2d563 | /cpp_adv_r_questions.R | 66e1e24427b58632a7d6798a712d4f4c9594f245 | [] | no_license | bweiher/r_cpp_learning | 7f7c1d3f989850e5e3f5deabfd7144269fd83def | f395529814df37e724702df6f14effb59226799f | refs/heads/master | 2020-04-01T04:10:29.847091 | 2018-12-17T02:20:44 | 2018-12-17T02:20:44 | 152,852,572 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,271 | r | cpp_adv_r_questions.R | # rcpp questions
library(Rcpp)
cppFunction("double f1(NumericVector x) {
int n = x.size();
double y = 0;
for(int i = 0; i < n; ++i) {
y += x[i] / n;
}
return y;
}")
x <- 1:10
for(g in seq_along(x)){
y = x[g] / length(x)
print(y)
}
f1(x)
median(x)
f1(c(1,2,3))
# Vector input, vector output
cppFunction('NumericVector pdistC(double x, NumericVector ys) {
int n = ys.size();
NumericVector out(n);
for(int i = 0; i < n; ++i) {
out[i] = sqrt(pow(ys[i] - x, 2.0));
}
return out;
}')
pdistC(0.5, runif(10))
cppFunction('NumericVector rowSumsC(NumericMatrix x) {
int nrow = x.nrow(), ncol = x.ncol();
NumericVector out(nrow);
for (int i = 0; i < nrow; i++) {
double total = 0;
for (int j = 0; j < ncol; j++) {
total += x(i, j);
}
out[i] = total;
}
return out;
}')
set.seed(1014)
x <- matrix(sample(100), 10)
rowSums(x)
# examples -----
# += addition operator
# mean function
cppFunction('double f1(NumericVector x) {
int n = x.size();
double y = 0;
for(int i = 0; i < n; ++i) {
y += x[i] ;
}
y = y / n;
return y;
}')
x <- 1:10
f1(x)
sum(x / length(x))
mean(x)
# cumsum ~
cppFunction('NumericVector f2(NumericVector x) {
int n = x.size();
NumericVector out(n);
out[0] = x[0];
for(int i = 1; i < n; ++i) {
out[i] = out[i - 1] + x[i];
}
return out;
}'
)
f2(x)
cumsum(x)
# does the bool x contain a TRUE value
# bool f3(LogicalVector x) {
# int n = x.size();
#
# for(int i = 0; i < n; ++i) {
# if (x[i]) return true;
# }
# return false;
# }
cppFunction('int f3(LogicalVector x) {
int n = x.size();
for(int i = 0; i < n; ++i) {
if (x[i]) return i;
}
return 99;
}')
f3(c(F,F,F))
f3(c(T,T,F))
f3(c(F,F,F,F,T))
f33(1:3)
cppFunction('int f4(Function pred, List x) {
int n = x.size();
for(int i = 0; i < n; ++i) {
LogicalVector res = pred(x[i]);
if (res[0]) return i + 1;
}
return 0;
}')
# NumericVector f5(NumericVector x, NumericVector y) {
# int n = std::max(x.size(), y.size());
# NumericVector x1 = rep_len(x, n);
# NumericVector y1 = rep_len(y, n);
#
# NumericVector out(n);
#
# for (int i = 0; i < n; ++i) {
# out[i] = std::min(x1[i], y1[i]);
# }
#
# return out;
# } |
d9cce87a0ab01cf0916523ea4b8c368636748c74 | a9c8e9612975e42f68e5a08b0d65fcbb5edb7616 | /plot3.R | 1dcc2f9f1da5febed5dfd9582e13349d76d37dce | [] | no_license | RaghavVacher/ExData_Plotting1 | e41b562dd975e12882f3b55b6eb46ae5543d2c4a | b665bc853f08ca05462953765239928f788caede | refs/heads/master | 2023-02-07T05:25:51.740550 | 2020-12-28T08:46:02 | 2020-12-28T08:46:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 846 | r | plot3.R | x <- read.table("C:\\Users\\Hp\\Downloads\\exdata_data_household_power_consumption\\household_power_consumption.txt", skip = 1, sep = ";")
colnames(x) <- c("Date", "Time", "Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
sub <- subset(x, x$Date == "1/2/2007" | x$Date == "2/2/2007")
datetime <- strptime(paste(sub$Date, sub$Time), "%d/%m/%Y %H:%M:%S")
sub1 <- as.numeric(sub$Sub_metering_1)
sub2 <- as.numeric(sub$Sub_metering_2)
sub3 <- as.numeric(sub$Sub_metering_3)
plot(datetime, sub1, type = "n", ylab="Energy Submetering", xlab="")
lines(datetime, sub1)
lines(datetime, sub2, col = "red")
lines(datetime, sub3, col = "blue")
legend("topright", c("Submetering 1", "Submetering 2", "Submetering 3"), col = c("black", "red", "blue"), lty = 1)
|
89308d7bff06ed1304981eed274130bbda2cbe6c | 9478cff072f07ea24c94b233a96a3cdb30f27e95 | /basic_script.R | f4d600dd0387463be5f7450481f35a8bc04917d0 | [] | no_license | gozdebudak/r-programming-basics | fce5d091af385c25e4369e6bc1d82e2c42c5beba | 22cfeaf3f3302013d2c872dc694d53de1e1b92a6 | refs/heads/master | 2023-04-22T21:26:28.445300 | 2021-05-09T17:58:49 | 2021-05-09T17:58:49 | 365,807,244 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,158 | r | basic_script.R | data <- read.csv("sample_data.csv") # Reading CSV file and creating dataframe object
print(data) # Printing the data
print(is.data.frame(data)) # Checking if the data object is a dataframe
print(ncol(data)) # The column count of the data dataframe
print(nrow(data)) # The row count of the data dataframe
names(data) # The column names of data dataframe
print(data[1:2,]) # Printing the first 2 rows of the data frame
print(data[152:153,]) # Printing the last 2 rows of the data frame
print(data[47,"Ozone"]) # Printing the value of "Ozone" in the 47th row
# Missing values count in the "Ozone" column of this data frame
sum(is.na(data$Ozone))
# The mean value of "Ozone" column except missing values
mean(data$Ozone[complete.cases(data$Ozone)])
# Mean of the "Solar.R" column in the subset of rows of the data frame where
# "Ozone" values are above 31 and "Temp" values are above 90
mean(subset(x=data, subset= Ozone > 31 & Temp > 90)$Solar.R)
# The mean of "Temp" when "Month" is equal to 6
mean(subset(x=data, subset=Month==6)$Temp)
# The max value of "Ozone" when "Month" is equal to 5
max(subset(x=data, subset=Month==5)$Ozone, na.rm=TRUE)
|
630dd959cb6f7b3ccc8af78c22f6d416623f5e0c | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/FlexReg/man/Consumption.Rd | 9b21829dbea8440a4ebc8dcb2b2bdcc6d6cd0772 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,743 | rd | Consumption.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DATA.R
\name{Consumption}
\alias{Consumption}
\title{Italian Households Consumption data}
\format{
A data frame containing 568 observations on the following 8 variables.
\describe{
\item{\code{NComp}}{the number of household members.}
\item{\code{Sex}}{the sex of the head of household.}
\item{\code{Age}}{the age of the head of household.}
\item{\code{NEarners}}{the number of household income earners.}
\item{\code{Area}}{a factor indicating the geographical area where the household is located.}
\item{\code{Citizenship}}{a factor indicating the citizenship of the head of household.}
\item{\code{Income}}{the net disposable income.}
\item{\code{Consumption}}{the propensity to consume, defined as the percentage of \code{Income} that is spent rather than saved.}
}
}
\source{
{ \href{https://www.bancaditalia.it/statistiche/tematiche/indagini-famiglie-imprese/bilanci-famiglie/distribuzione-microdati/index.html?com.dotmarketing.htmlpage.language=1}{Bank of Italy, Survey on Household Income and Wealth, 2016}. \cr
\cr
\href{https://www.bancaditalia.it/statistiche/tematiche/indagini-famiglie-imprese/bilanci-famiglie/documentazione/documenti/2016/eng_Legen16.pdf?language_id=1}{Survey description.}
}
}
\description{
This dataset is a subset from the 2016 Survey on Household Income and Wealth data, a statistical survey conducted by Bank of Italy. The statistical units are the households and the head of the household is conventionally selected as the major income earner.
}
\details{
Full data are available on the website of the Bank of Italy. \code{Consumption} has been created by dividing the variable `consumption` over the `net disposable income`.
}
|
b17506d87cf048ebc6452ccbbe09f5b538194b99 | caf361bdbc2459187fb58fae876bad5497e532a1 | /man/plot_result_rank.Rd | 8b5006dcde855ff93247eb1a5d0624afbce32ce4 | [
"MIT"
] | permissive | ddiez/scmisc | 35efffabe859ddc6ac9c2c20f00d283a376def44 | f19819e7e736cfd167fd4b0c29d7290d66ab961a | refs/heads/master | 2023-08-17T04:08:03.971880 | 2023-08-06T13:35:17 | 2023-08-06T13:35:17 | 180,719,852 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 693 | rd | plot_result_rank.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/haystack.R
\name{plot_result_rank}
\alias{plot_result_rank}
\alias{plot_result_rank.haystack}
\alias{plot_result_rank.data.frame}
\title{plot_result_rank}
\usage{
plot_result_rank(x, highlight = NULL, sort.by = "log.p.adj")
\method{plot_result_rank}{haystack}(x, highlight = NULL, sort.by = "log.p.adj")
\method{plot_result_rank}{data.frame}(x, highlight = NULL, sort.by = "log.p.adj")
}
\arguments{
\item{x}{haystack object or summary data.frame.}
\item{highlight}{gene names to highlight.}
\item{sort.by}{rank the genes by the following column from the summary data.frame.}
}
\description{
plot_result_rank
}
|
6cf05033fe8aaab69014012d593bb736d2a3070b | 9ef445e42d40f7bedfb6091877a1c1ca8e2cb8d1 | /server.R | 016d1238d59e2f89c70d20134a994290ea56c311 | [] | no_license | marco-vene/datitalia | 03a1d0285ab4f3b6a646639078494acbc29b3060 | e02c4c6aacefee4d2a1b8b62a3f2f66af73d2454 | refs/heads/master | 2020-09-07T15:37:22.771643 | 2019-11-10T18:29:12 | 2019-11-10T18:29:12 | 220,829,641 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,161 | r | server.R | # Define the server
shinyServer( function(input, output) {
# output$trendPlot <- renderGirafe({
# ggiraph(code = print(trendLine(dati, input$gruppo, input$metrica)))
# })
output$trendPlot <- renderPlotly({
trendLine(dati, input$gruppo, input$metrica, input$periodo)
})
output$description <- renderUI({
if(input$metrica == "Popolazione")
{
includeMarkdown("def_popolazione.md")
}
else if(input$metrica == "Occupati")
{
includeMarkdown("def_occupati.md")
}
else if(input$metrica == "Disoccupati")
{
includeMarkdown("def_disoccupati.md")
}
else if(input$metrica == "Inattivi")
{
includeMarkdown("def_inattivi.md")
}
else if(input$metrica == "Tasso_Occupazione")
{
includeMarkdown("def_tasso_occ.md")
}
else if(input$metrica == "Tasso_Disoccupazione")
{
includeMarkdown("def_tasso_dis.md")
}
else if(input$metrica == "Tasso_Inattivita")
{
includeMarkdown("def_tasso_ina.md")
}
else{
NULL
}
})
})
|
b08f8c6ad9736f9a76e630d88ff43d8d938cfcc3 | ddb120b0aaa38527d4eded97552e63d1cad7fb9a | /Project3/server.R | eab74849a4929780e3558b5542a7a86097df77c7 | [] | no_license | dwatie/project3 | df858e17c541ca9039c154a47665734c8189c589 | 593d6da14ebf6c80ef2ab97a5b6d81fbe265954c | refs/heads/main | 2023-07-01T05:25:48.125878 | 2021-08-03T03:32:39 | 2021-08-03T03:32:39 | 390,863,936 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,039 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(tidyverse)
library(DT)
library(caret)
library(plotly)
library(ggplot2)
library(randomForest)
library(mathjaxr)
# Define server logic required to draw a histogram
shinyServer(function(input, output,session) {
collegeBall <- read.csv("cbb19.csv")
getData <- reactive({
wccB10 <- collegeBall %>% select(TEAM, CONF, G, W, X2P_O, X2P_D, X3P_O, X3P_D, TOR, TORD) %>% filter(CONF == c("WCC", "B10"))
})
output$wccB10Table <- renderDataTable({
getData()
})
output$plots <- renderPlot({
if (input$plotnum == "gmesPlot"){
G = ggplot(wccB10, aes(x = CONF, y = G)) + geom_bar(stat = "identity")
}
if (input$plotnum == "winsPlot"){
G = ggplot(wccB10, aes(x = CONF, y = W))+ geom_bar(position = "dodge",
stat = "identity",
aes(fill = TEAM))
}
if (input$plotnum == "densityPlot"){
G = ggplot(wccB10, aes(x = G)) + geom_density(aes(fill = CONF),
alpha = 0.5,
kernel = "gaussian")
}
if (input$plotnum == "boxPlot"){
G = plot_ly(wccB10, x = ~X3P_O, color = ~CONF, type = "box")
}
G
})
output$confsumms <- renderDataTable({
if (input$datasums == "confWins"){
M = wccB10 %>% group_by(CONF) %>%
summarise(Min = min(W),
Med = median(W),
Avg = mean(W),
Max = max(W),
StDev = sd(W))
}
if (input$datasums == "confTwo"){
M = wccB10 %>% group_by(CONF) %>%
summarise(Min = min(X2P_O),
Med = median(X2P_O),
Avg = mean(X2P_O),
Max = max(X2P_O),
StDev = sd(X2P_O))
}
if (input$datasums == "confThree"){
M = wccB10 %>% group_by(CONF) %>%
summarise(Min = min(X3P_O),
Med = median(X3P_O),
Avg = mean(X3P_O),
Max = max(X3P_O),
StDev = sd(X3P_O))
}
if (input$datasums == "confTor"){
M = wccB10 %>% group_by(CONF) %>%
summarise(Min = min(TOR),
Med = median(TOR),
Avg = mean(TOR),
Max = max(TOR),
StDev = sd(TOR))
}
M
set.seed(1)
train <- sample(1:nrow(wccB10), size = nrow(wccB10)*0.7)
test <- dplyr::setdiff(1:nrow(wccB10), train)
wccB10Train <- wccB10[train, ]
wccB10Test <- wccB10[test, ]
wccB10TrainA <-wccB10Train %>% select(CONF, G, W, X2P_O, X2P_D, X3P_O, X3P_D, TOR, TORD)
wccB10TestA <- wccB10Test %>% select(CONF, G, W, X2P_O, X2P_D, X3P_O, X3P_D, TOR, TORD)
gmesVar <- wccB10TrainA$G
two<- wccB10TrainA
rfFit <- train(W ~ input$gmesVar, data = wccB10TrainA,
method = "rf",
trControl = trainControl(method = "repeatedcv",
repeats = 3,
number = 10),
linout = TRUE,
tuneGrid = data.frame(mtry = 1:10),
data = wccB10TrainA)
bestLm <- lm(W ~ input$gmesVar, data = wccB10TrainA)
ClassFit <- train(W ~ input, data = wccB10TrainA,
method = "rpart",
preProcess = c("center", "scale"),
trControl = trCtrl)
models <- list(c(ranfor, linreg, classtree))
models[[1]] <<- rfFit
models[[2]] <<- bestLm
models[[3]] <-- classfit
output$info <- renderText({
p("The three modeling approaches that will be used are the Multiple Linear Regression Model, Classification Tree Model, and the Random Forest Model. They will be used to find a linear regression equation that is made up of a response variable which in this case will the wins variable, an intercept, and a combination of predictor variables.) Multiple Linear Regression Model pros and cons go here Classification Tree Model pros and cons go here Random Forest Model pros and cons go here")
})
})
})
|
02a84ccdc624056781ec1b970cb8184d48945ba0 | 68562f910349b41cdf4432c0921940f0513ab516 | /tests/testthat/test-style_xaringan.R | 1fc0ebd071fe53304582101422ddc11b8cf70c81 | [
"MIT"
] | permissive | gadenbuie/xaringanthemer | 2990406aff24a458695c6e4793c891dff5feb506 | 85091cd16af5a938b6d927ff5f6b0fe990ee0e63 | refs/heads/main | 2022-09-15T18:32:49.954381 | 2022-08-20T18:03:58 | 2022-08-20T22:47:52 | 129,549,154 | 446 | 28 | NOASSERTION | 2022-08-20T16:58:02 | 2018-04-14T19:44:17 | R | UTF-8 | R | false | false | 483 | r | test-style_xaringan.R |
test_that("style_xaringan() writes to specified outfile", {
tmpfile <- tempfile(fileext = ".css")
expect_equal(style_xaringan(outfile = tmpfile), tmpfile)
expect_true(file.exists(tmpfile))
expect_true(grepl("xaringanthemer", readLines(tmpfile)[3]))
})
test_that("style_xaringan() warns if base_font_size is not absolute", {
tmpfile <- tempfile(fileext = ".css")
expect_warning(
style_xaringan(outfile = tmpfile, base_font_size = "1em"),
"absolute units"
)
})
|
c831e18ce1ad8d2e4000e4d8f190a872bfffcdaf | 4b402d90385a6a291c4761d08adac6d5ce547d18 | /antweb.R | d199f2e25e93344bc0428b6dad21ae05c241ed55 | [] | no_license | karthik/antweb_paper | 273cf0425f3154f0afb742690c067c4c73e22b65 | 07337be82311cde0f16e69aa603649f48b5629e2 | refs/heads/master | 2020-05-27T12:35:37.434962 | 2014-10-23T15:05:48 | 2014-10-23T15:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,025 | r | antweb.R |
## @knitr counts
library(AntWeb)
genera <- aw_distinct("genus")$count
species <- aw_distinct("species")
species <- species$count
## @knitr how_many_species
madagascar <- aw_data(country = "Madagascar")
total_results <- madagascar$count
offset <- seq(0,ceiling(total_results), by = 1000)
madagascar_all <- lapply(offset, function(x) {
message(sprintf("finishing %s results", x))
output <- aw_data(country = "Madagascar", offset = x, quiet = TRUE)
return(output$data)
})
all_madagascar_data <- data.table::rbindlist(madagascar_all)
saveRDS(all_madagascar_data, file = "data/madagascar.rda", compress = 'xz')
write.csv(all_madagascar_data, file = "all_madagascar_data.csv")
## @knitr elevation_gradient
message("placeholder for ...")
## @knitr latitude_gradient
message("placeholder for ...")
## @knitr across_habitats
message("placeholder for ...")
## @knitr two_localities
message("placeholder for ...")
## @knitr endemism
message("placeholder for ...")
## @knitr accumulation
message("placeholder for ...") |
63e89e861bb9084efd36049f01cb602b882b7065 | 6d96dbaeb9e3985a278e81cacb92eabed0908e1e | /R/create_dsproject.R | 74783a7d4729075d0eb0944f9539d8278edca045 | [
"MIT"
] | permissive | cimentadaj/dsproj | f23368f11ab5dec53b4999ac1159ce31a2669361 | aa99fa025921cf82524064935185b63d5d71a5a8 | refs/heads/master | 2020-04-13T18:42:51.477873 | 2019-02-10T20:27:17 | 2019-02-10T20:27:17 | 163,382,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,680 | r | create_dsproject.R | #' Creates a template of folders and files for the 'ideal' data science project
#'
#' @param path A path where to create the project template. Can be relative, absolute and non existent.
#' @param open whether to open the RStudio project or not. Set to
#' FALSE by default
#'
#' @details
#' The function accepts a valid path (either relative or absolute) and applies these steps:
#'
#' @details text describing parameter inputs in more detail.
#' \itemize{
#' \item{"Folders"}{Creates folders code, data, report and misc inside \code{path}}
#' \item{"RProjects"}{Creates an R project in \code{path}}
#' \item{"Git"}{Initializes a Git repository in \code{path}}
#' \item{"Documentation"}{Adds a README.Rmd for project purposes}
#' \item{"Package dependency"}{Installs and loads \code{packrat} for package dependency management}
#' \item{"Fresh start"}{Restarts R and opens the newly created .Rproj with packrat loaded}
#' }
#'
#' @return Nothing, it creates and edits several folders and files in \code{path}
#' @export
#'
#' @examples
#'
#' \dontrun{
#' create_dsproject()
#' }
#'
create_dsproject <- function(path, open = FALSE) {
stopifnot(is.character(path))
path <- normalizePath(path, winslash = .Platform$file.sep, mustWork = FALSE)
dirs_create <- file.path(path, c("code", "data", "report", "misc"))
for (folder in dirs_create) dir.create(folder, recursive = TRUE)
print_styler('Created folder ', dirs_create)
if (!requireNamespace("rmarkdown", quietly = TRUE)) {
print_styler("Installing rmarkdown for reporting")
cat("\n")
utils::install.packages("rmarkdown")
}
usethis::proj_set(path, force = TRUE)
usethis::use_rstudio()
print_styler("Created RStudio project")
cat("\n")
r <- git2r::init(usethis::proj_get())
print_styler("Created git repository")
usethis::use_git_ignore(c(".Rhistory", ".RData", ".Rproj.user"))
# I think this adds .Rbuildignore -- exclude
# Add a custom readme for ds projects
usethis::use_readme_rmd(open = FALSE)
# Add a set of preinstalled packages for every project
print_styler("Installing packrat for package dependency")
cat("\n")
unloadNamespace('packrat')
utils::install.packages("packrat")
write("packrat::on()", file.path(path, ".Rprofile"), append = TRUE)
write("options(repos = c(CRAN = 'https://cran.rstudio.com'))", file.path(path, ".Rprofile"), append = TRUE)
initial_styler(paste0("Set packrat mode on as default in", crayon::blue(" .Rprofile")))
print_styler("Activating packrat project")
cat("\n")
packrat::init(path, infer.dependencies = FALSE, enter = FALSE)
if (open) rstudioapi::openProject(usethis::proj_get())
invisible(TRUE)
}
|
4df02e56965e31ec8a15aeeee2542ac2245c3df0 | d1a87fe12e6f3eba49346d4d89c8f1a931e8715a | /Face_update/face_update.R | 4cfcabecb6da360c0c19cf64d2af04875cb0445d | [] | no_license | danmrc/azul | 9f557876557c046112a9374fcd7fabf612271090 | 87752b17778368b63ff43054a56b83048cc973c4 | refs/heads/master | 2023-08-22T01:13:39.342663 | 2023-08-04T22:10:26 | 2023-08-04T22:10:26 | 141,443,005 | 3 | 1 | null | 2023-07-11T17:31:52 | 2018-07-18T14:04:22 | HTML | UTF-8 | R | false | false | 632 | r | face_update.R | parse_website <- function(url){
require(xml2)
pag <- read_html(url)
fs <- xml_find_all(pag,xpath = "//h3[@class= 'item-title']/a")
ss <- xml_attr(fs,"href")
return(ss)
}
checkBlog <- function(newList,oldList,token){
require(Rfacebook)
teste <- prod(newList == oldList)
if(teste==1){
return("No updates")
} else{
readline(prompt= "New post. Press [enter] to continue")
new_post_url <- newList[newList != oldList][1]
new_post_url <- paste0("https://azul.netlify.com/",new_post_url)
updateStatus("Novo post", token = token, link = new_post_url)
}
}
save(obj, file = "Face_update/Lista.Rdata")
|
1dac1bac6c182ffed80613f44ec5cdc6fd63d6d9 | 58bf560d8a6dd1b6cca4a86bce02dff95eec244d | /man/it_hospbed.Rd | 75df3454cdbc1a6c83c429983e903165912606fb | [
"MIT"
] | permissive | c1au6i0/covid19census | 5576b1d8fd11395e34975e8df146676e7cb5e4e4 | 4a66bd27da2387e4c2ee25a0deb59f829abc53a6 | refs/heads/master | 2023-05-03T00:05:57.186159 | 2021-05-18T13:54:37 | 2021-05-18T13:54:37 | 271,893,792 | 4 | 0 | NOASSERTION | 2021-05-18T13:54:38 | 2020-06-12T21:32:31 | R | UTF-8 | R | false | true | 779 | rd | it_hospbed.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets_it.R
\docType{data}
\name{it_hospbed}
\alias{it_hospbed}
\title{hospital beds}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 21 rows and 5 columns.
}
\source{
\href{http://www.dati.salute.gov.it/}{Ministero della Salute}
}
\usage{
data(it_hospbed)
}
\value{
a tibble in wide format in which \code{bed_acute}, \code{bed_long}, \code{bed_rehab}, \code{bed_tot} refers to acute care, long term care,
rehabilitation and total beds, respectivelly
}
\description{
Inpatient hospital beds per 1000 people. Collected in 2017
}
\details{
\href{http://www.dati.salute.gov.it/dati/dettaglioDataset.jsp?menu=dati&idPag=18}{methodology}
}
\keyword{datasets}
|
1a5a569560105ee5717001f2ef97cc695ecf55e8 | 16cbcd4b55e9df1e91f2d69702790023c9cf6780 | /799435798.r | d3095c5443b480fb5b9b95b7ba5ed7f9e14ddc78 | [] | no_license | erex/MT3607-peer-review | 3f65c9a168f34e947fe0e531e773029384c19314 | bc0750e9a7fb5f2d0a7c7e35b34b3a80213d9fde | refs/heads/master | 2020-06-03T06:12:34.093705 | 2014-10-10T09:49:51 | 2014-10-10T09:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,095 | r | 799435798.r | #I confirm that the attached is my own work, except where clearly indicated in the text.
my.rnorm<-function(n,mean=0,sd=1){
#Purpose:
#Returns n pseudo-random variables from a normal distribution
#Inputs:
#n- number of observations: a numeric scalar,
#mean - mean: a numeric scalar with default 0,
#sd- standard deviation: a numeric scalar with default 1
#Outputs:
#a vector of n pseudo-random values from a normal distribution
#Stops the function and returns an error message if the arguments are invalid
if(is.numeric(n) & is.numeric(mean) & is.numeric(sd) ){
}else{
stop("invalid arguments")
}
if(sd < 0) stop("invalid arguments")
if(n <= 0) stop("invalid arguments")
if(n!=round(n)) stop("invalid arguments")
#Vector to contain the normally distributed deviates
normdev<-c(rep(0,n))
#for loop using the central limit method to find n normally distributed deviates
for(i in 1:n){
U<-runif(16,0,1)
x<-((sum(U)-8)*sqrt(12/16))*sd+mean
normdev[i]<-x
}
return(normdev)
}
my.rchisq<-function(n,df=1){
#Purpose:
#Returns n pseudo-random chi-squared distributed deviates
#Inputs:
#n- number of observations: a numeric scalar
#df-degrees of freedom: a numeric scalar with default 1
#Outputs:
#a vector of n pseudo-random chi-squared distributed deviates
#Stops the function and returns an error message if the arguments are invalid
if(is.numeric(n) & is.numeric(df)){
}else{
stop("invalid arguments")
}
if(df <= 0) stop("invalid arguments")
if(n <=0 ) stop("invalid arguments")
if(n!=round(n)) stop("invalid arguments")
if(df!=round(df)) stop("invalid arguments")
#Create vector for the pseudo-random chi-squared distributed deviates
chisqdev<-c(rep(0,n))
#for loop to calculate the chi-squared distributed deviates by summing
#the squares of normal random variables
for(i in 1:n){
z<-my.rnorm(df)
chi<-sum(z^2)
chisqdev[i]<-chi
}
return(chisqdev)
}
my.rf<-function(n,df1=1,df2=1){
#Purpose:
#Returns n pseudo-random F-distributed deviates
#Inputs:
#n- number of observations: a numeric scalar
#df1- degrees of freedom: a numeric scalar with default 1
#df2- degrees of freedom: a numeric scalar with default 1
#Outputs:
#a vector of n pseudo-random F-distributed deviates
#Stops the function and returns an error message if the arguments are invalid
if(is.numeric(n) & is.numeric(df1) & is.numeric(df2) ){
}else{
stop("invalid arguments")
}
if(df1 <= 0) stop("invalid arguments")
if(df2 <= 0) stop("invalid arguments")
if(n <= 0) stop("invalid arguments")
if(n!=round(n)) stop("invalid arguments")
if(df1!=round(df1)) stop("invalid arguments")
if(df2!=round(df2)) stop("invalid arguments")
#Vector to contain the f distributed deviates
ftestdev<-c(rep(0,n))
#For loop to calcualte the n f-distributed deviates
for(i in 1:n){
u<-my.rchisq(1,df1)
v<-my.rchisq(1,df2)
f<-(u/df1)/(v/df2)
ftestdev[i]<-f
}
return(ftestdev)
}
test.myrnorm<-function(n,mean=0,sd=1){
#Purpose:
#Test to check that my.rnorm recognises invalid inputted arguments
#and consequently doesn't produce a vector of incorrect numeric values
#(checks my.rnorm gives an error when a negative n or sd is inputted and
#when n isn't a whole number).
#Inputs:
#(Same as my.rnorm)
#n- number of observations: a numeric scalar,
#mean - mean: a numeric scalar with default 0,
#sd- standard deviation: a numeric scalar with default 1
#Outputs:
#Pass or Fail
x<-try(my.rnorm(n,mean,sd),silent=TRUE)
if(((sd<0) | (n<=0) | (n!=round(n))) & is.numeric(x)){
cat("Fail")
}else{
cat("Pass")
}
}
test.myrchisq<-function(n,df=1){
#Purpose:
#Test to check that my.rchisq recognises invalid inputted arguments
#and consequently doesn't produce a vector of incorrect values
#(checks my.rchisq gives an error when a negative n or df is inputted and
#when n or df aren't a whole number)
#Inputs:
#(Same as my.rchisq)
##n- number of observations: a numeric scalar
#df-degrees of freedom: a numeric scalar with default 1
#Outputs:
#Pass or Fail
x<-try(my.rchisq(n,df),silent=TRUE)
if(((n!=round(n)) | (n<=0) | (df!=round(df)) | (df<=0)) & is.numeric(x)){
cat("Fail")
}else{
cat("Pass")
}
}
test.myrf<-function(n,df1=1,df2=1){
#Purpose:
#Test to check that my.rf recognises invalid inputted arguments
#and consequently doesn't produce a vector of incorrect values
#(checks my.rf gives an error when a negative n,df1 or df2 is inputted and
#when n, df1 or df2 isn't a whole number)
#Inputs:
#(Same as my.rf)
#n- number of observations: a numeric scalar
#df1- degrees of freedom: a numeric scalar with default 1
#df2- degrees of freedom: a numeric scalar with default 1
#Outputs:
#Pass or Fail
x<-try(my.rf(n,df1,df2),silent=TRUE)
if(((n!=round(n))|(n<=0)|df1!=round(df1)|(df1<=0)|df2!=round(df2)|(df2<=0)) & is.numeric(x)){
cat("Fail")
}else{
cat("Pass")
}
}
outputtest<-function(n,mean=0,sd=1,df=1,df1=1,df2=1,test){
#Purpose:
#Test to check that the output of the function (when it hasn't given an error)
#is the correct amount of values and that is numeric
#Inputs:
#To test the function my.rnorm: test=1
#To test the function my.rchisq: test=2
#To test the function my.rf: test=3
#n- number of observations: a numeric scalar,
#mean - mean: a numeric scalar with default 0 (argument only used when test=1)
#sd- standard deviation: a numeric scalar with default 1 (argument only used when test=1)
#df-degrees of freedom: a numeric scalar with default 1 (argument only used when test=2)
#df1- degrees of freedom: a numeric scalar with default 1 (argument only used when test=3)
#df2- degrees of freedom: a numeric scalar with default 1 (argument only used when test=3)
#Outputs:
#Pass or Fail
switch(as.character(test),
"1"= (x<-my.rnorm(n,mean,sd)),
"2"= (x<-my.rchisq(n,df)),
"3"= (x<-my.rf(n,df1,df2)))
if(length(x)==n & is.numeric(x)){
cat("Pass")
}else{
cat("Fail")
}
}
|
84aa0d7c65b9eaa63ebdeb7fd77a5c6e2dfb4a44 | 39f7d071437cb3489029f0f751600b71ac798962 | /1a 使用R語言進行資料分析/助教課 Week_5/finalExamSol.R | 06e5a392dad0af947345ec51379a186549992ab6 | [] | no_license | evan950608/Evan-R-Programming | 070044950d51e2370d827c41c300160a169f3773 | 586a0f706b71b7e53ea78ad7e1b76be35de0ea81 | refs/heads/master | 2020-04-07T11:49:01.354918 | 2019-01-26T12:05:19 | 2019-01-26T12:05:19 | 158,342,221 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,702 | r | finalExamSol.R | #### 注意事項 ####
# 0. 請勿改動 angry_fruit.R,否則將導致此檔案中讀取資料出錯
# 1. 變數名稱請勿改動,若造成判斷錯誤一蓋不負責。
# 2. 請不要用 rm(list=ls()) 之類的東西,我們的 judge 會壞掉。
# 3. ggplot2 的 ggplot() 會回傳東西,第二大題的所有答案都請存到變數中
# ex: gg_exam <- ggplot(data=..., aes(...)) + ...
# 4. 提交答案之前請再次檢查變數存的東西是否符合題目要求。
# 5. 滿分不是一百分
#### 0 ####
# 0.0 (5%)
# 請自行查詢 require() 回傳值
# 請寫出程式碼 "若 沒安裝 rstudioapi 套件,則 安裝 rstudioapi。引入 rstudioapi "
if(!require('rstudioapi')){
install.packages('rstudioapi')
require('rstudioapi')
}
# 0.1 (5%)
# 已知 dirname(rstudioapi::getSourceEditorContext()$path) 會顯示當前.R檔案所在位置
# 請自行查詢 setwd() 和 dirname() 如何使用後,
# 寫出程式碼 "將當前 .R 檔案所在位置設為工作目錄"
# (如果寫不出來一樣請手動設定檔案當前目錄 final2 為 working directory)
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
# 設定檔案當前目錄 final2 為 working directory 後才能 run 下面這行
# 請不要改動 angry_fruit.R 檔案
source('angry_fruit.R')
#### 1 ####
## 俊俊是賣憤怒水果的商人
## 以下是今天他所進貨的憤怒水果的資料
# 水果名稱
fruit_name
# 憤怒程度
anger
# 美味程度
deliciousness
# 1.0 (5%)
# 把三筆資料做成一個 dataframe 存到變數 angry_fruit_na
# column names 分別是 Name, Anger, Deliciousness
angry_fruit_na <- data.frame(Name=fruit_name, Anger=anger, Deliciousness=deliciousness)
angry_fruit_na
# 1.1 (5%)
# 將 anger 或是 delicious 為 NA 值的資料從 angry_fruit_na 移除後
# 將結果存入 angry_fruit
# column names 分別是 Name, Anger, Deliciousness
angry_fruit <- na.omit(angry_fruit_na)
angry_fruit
# 1.2 (5%)
# 水果不能太憤怒,容易過熟影響美味程度
# 俊俊不販賣不美味的憤怒水果
# 理想的憤怒區間為 [0, 100]、而且美味門檻 >= 50
# 請將 angry_fruit 照理想的憤怒區間和美味門檻篩選後
# 將結果存入 ideal_fruit
ideal_fruit <- angry_fruit[angry_fruit$Anger <= 100 & angry_fruit$Anger > 0 & angry_fruit$Deliciousness >= 50,]
# 1.3 (5%)
# 俊俊希望販賣的水果有接近的憤怒值
# 請將憤怒程值超過一個標準差以外的資料從 ideal_fruit 移除
# 並將結果存入 very_ideal_fruit
v <- ideal_fruit$Anger
very_ideal_fruit <- ideal_fruit[(v >= (mean(v) - sd(v))) & (v <= (mean(v) + sd(v))),]
very_ideal_fruit
# 1.4 (5%)
# 冠冠想吃美味的水果
# 請將 very_ideal_fruit 美味值超過 80% 位數(pr80以上)的資料存入 good_fruit
good_fruit <- very_ideal_fruit[very_ideal_fruit$Deliciousness >= quantile(very_ideal_fruit$Deliciousness, 0.8),]
good_fruit
# 1.5 (5%)
# 冠冠有選擇障礙
# 請隨機從 good_fruit 選出一個水果 存入 cm_fruit (存 Name 就好)
# 請用 R 的函數去隨機,不能自己想一個數字
cm_fruit <- as.character(sample(good_fruit$Name, 1))
cm_fruit
# 1.6 (10%)
# 冠冠很滿足,但他不會寫程式
# 請幫他寫一個函式 which_to_eat
# 傳入參數 angry_fruit_na (ex: which_to_eat(angry_fruit_na),輸入保證只有 angry_fruit_na)
# 回傳值為依照 1.1~1.5 步驟篩選後的水果名
# 並將輸出存進 cm_today_fruit
require('dplyr')
which_to_eat <- function(df){
df <- na.omit(df)
df <- df %>%
filter(Anger <= 100, Deliciousness >= 50) %>%
filter(abs(Anger - mean(.$Anger)) <= sd(.$Anger)) %>%
filter(Deliciousness >= quantile(.$Deliciousness, 0.8)) %>%
sample_n(1)
return(as.character(df$Name))
}
cm_today_fruit <- which_to_eat(angry_fruit_na)
cm_today_fruit
# 1.7 (5%)
# 請將 angry_fruit 的 row 依照美味度由大到小排序後存入 angry_fruit_rank
# 若美味度相等,依照憤怒度由大到小排序
# 都一樣的話,依照索引值由小到大排序
angry_fruit_rank <- arrange(angry_fruit, desc(Deliciousness), desc(Anger))
angry_fruit_rank
# 1.8 (10%)
############################################################################
# 已載入 get_50d() (寫在 angry_fruit.R 裡面) #
# 呼叫 get_50d() 會得到一個 list,包含50筆結構如 angry_fruit_na 的 #
# dataframe #
# 注意資料是隨機生成,每次呼叫會不同 #
############################################################################
#
# 俊俊的水果資料每天都會更新
# 俊俊想觀察過去50天水果的資訊,以多進貨冠冠會想吃的水果
# 請寫一個函式 past_50_info()
# 沒有參數
# 過去50天俊俊進貨的水果資訊請用 get_50d() 來產生
#
# 回傳值是一個結構如 angry_fruit_na 的 dataframe
# 第一個 col 為 水果名稱,請使用 fruit_name
# 第二個 col 為「這 50 天中,該水果的平均憤怒程度」,50天皆為 NA 的水果請設為 NaN
# 第三個 col 為「這 50 天中,該水果的平均美味程度」,50天皆為 NA 的水果請設為 NaN
# 請先忽略 NA 後再取平均
# column names 分別為 Name, avg_Anger, avg_Deliciousness
past_50_info <- function(){
past_50_days <- get_50d()
avg_Anger <- sapply(past_50_days, function(df) df$Anger) %>%
rowMeans(na.rm = T)
avg_Deliciousness <- sapply(fifty, function(df) df$Deliciousness) %>%
rowMeans(na.rm = T)
avg_angry_fruit <- data.frame(Name = fruit_name,
avg_Anger = avg_Anger,
avg_Deliciousness = avg_Deliciousness)
return (avg_angry_fruit)
}
#### 2 ####
require('ggplot2')
# 2.1 (10%)
# 利用 ggplot2 畫出內建資料集 airquality 中 Month 為 8 的資料
# Ozone 與 Temp 的 x-y 關係點圖
# 並把圖存到變數 gg1 中
gg1 <- airquality %>%
filter(Month == 8) %>%
ggplot(aes(Ozone, Temp)) + geom_point()
gg1
# 2.2 (10%)
# 利用 ggplot2 畫出內建資料集 mtcars 中 wt, mpg
# 的 x-y 關係點圖,並依據不同的 cyl 分出不同顏色
# 並把圖存到變數 gg2 中
gg2 <- mtcars %>%
ggplot(aes(wt, mpg, color=cyl)) +
geom_point()
gg2
# 2.3 (10%)
# 畫出內建資料集 airquality 中
# x 軸為不同月份 Month,y 軸為該月份 Ozone 的平均值
# 的長條圖,並把圖存到變數 gg3 中
gg3 <- airquality %>%
group_by(Month) %>%
summarise(OzoneMean = mean(Ozone, na.rm=T)) %>%
ggplot(aes(Month, OzoneMean)) +
geom_bar(stat="identity")
gg3
# 2.4 (15%)
# (請依照 0.0 0.1 提示或手動將 .R 檔案當前位置設為 working directory)
# 讀取 "106_student.csv" 檔,存到變數 student 中
# 這份 csv 是 106 學年度全台各地大學專院校的學生人數
# 請先把「等級別」為 "B 學士"、「日間.進修別」為 "D 日" 的資料篩選出來
# 再畫出臺北市大學與非臺北市大學的一年級人數總數的長條圖
# x 軸為是否位於臺北市,y 軸為一年級學生總數 (記得要把男女加起來)
# 並把圖存到變數 gg4 中
student <- read.csv('106_student.csv', sep=",", encoding='utf8')
student
view(student)
gg4 <- student %>%
filter(等級別 == "B 學士", 日間.進修別 == "D 日") %>%
# use gsub() to replace ',' as ''
# ex: '1,024'
transmute(freshman = as.numeric(gsub(",", "", 一年級男生)) + as.numeric(gsub(",", "", 一年級女生)),
atTaipei = (縣市名稱 == "30 臺北市")) %>%
ggplot(aes(atTaipei, freshman)) +
geom_bar(stat="identity")
gg4
|
bd7799bf6f31a58a482690980539bd8ca50838b9 | 7bdee0060e806b64dede482401398149cac7271e | /cointegration/pairs_plot.R | 2768fee098d1c7f5f66f0c8a1ac1e9b063f11473 | [] | no_license | maxim5/stat-arbitrage-r | b775133cdb0aa2e906fde3bcf92dfdf542ab5393 | 5e21c623015f2e8df7bd9ae07435da61bb188669 | refs/heads/master | 2022-11-29T07:58:23.162422 | 2015-08-26T09:10:55 | 2015-08-26T09:10:55 | 287,816,691 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,412 | r | pairs_plot.R | #!/usr/bin/Rscript
suppressMessages(library(ggplot2))
suppressMessages(require(reshape2))
invisible(Sys.setlocale("LC_TIME", "en_US.UTF-8"))
load("pairs.RData")
Plot.Price = function(symbol1, symbol2) {
series1 = all.logs[[symbol1]]
series2 = all.logs[[symbol2]]
dates = as.Date(rownames(all.logs))
data.to.plot = data.frame(exp(series1), exp(series2), dates)
colnames(data.to.plot) = c(symbol1, symbol2, "Date")
data.to.plot = melt(data.to.plot, id="Date")
plot = ggplot(data.to.plot, aes(x=Date, y=value, color=variable)) +
geom_line() +
labs(title=paste0(symbol1, " vs ", symbol2),
x="Time", y="Price") +
scale_colour_discrete(name="Legend")
print(plot)
}
Plot.Logs = function(symbol1, symbol2) {
series1 = all.logs[[symbol1]]
series2 = all.logs[[symbol2]]
dates = as.Date(rownames(all.logs))
data.to.plot = data.frame(series1, series2, dates)
colnames(data.to.plot) = c(symbol1, symbol2, "Date")
data.to.plot = melt(data.to.plot, id="Date")
plot = ggplot(data.to.plot, aes(x=Date, y=value, color=variable)) +
geom_line() +
labs(title=paste0("Log(", symbol1, ") vs Log(", symbol2, ")"),
x="Time", y="Log(Price)") +
scale_colour_discrete(name="Legend")
print(plot)
}
Plot.Spread = function(spread, x=NULL, title="Spread") {
mean = mean(spread)
sd = sd(spread)
stats = boxplot.stats(spread)
if (is.null(x)) {
x = index(spread)
}
plot(x=x, y=spread, type="l", lwd=2, col="darkorchid",
xlab="Time", ylab="Spread", main=title)
Add.HLine = function(level, color) {
abline(h=level, col=color)
text(x[1], level, signif(level, 3), col=color, adj=c(0.5, 0))
}
Add.HLine(mean, "aquamarine3")
Add.HLine(mean+sd, "aquamarine4")
Add.HLine(mean-sd, "aquamarine4")
Add.HLine(stats$stats[1], "firebrick1")
Add.HLine(stats$stats[5], "firebrick1")
}
Plot.Pair = function(symbol1, symbol2) {
Plot.Price(symbol1, symbol2)
Plot.Logs(symbol1, symbol2)
series1 = all.logs[[symbol1]]
series2 = all.logs[[symbol2]]
dates = as.Date(rownames(all.logs))
gamma = as.numeric(cointegrated.pairs[cointegrated.pairs$Symbol1 == symbol1 &
cointegrated.pairs$Symbol2 == symbol2, "Gamma"])
spread = series1 - gamma * series2
Plot.Spread(spread=spread, x=dates,
title=paste0("Log(", symbol1, ") - ", signif(gamma, 3), "*Log(", symbol2, ")"))
}
|
afd471986821ab83c70ad6bd8e3990c4423f09e9 | f73e7dbcc24064028c81f9f778f9892bd55d9066 | /shiny/ui.R | e8f4a2e0534c9642cc3952490b60ed30fff55bf2 | [] | no_license | kchaaa/INFO-498F-Final-Project | 030d235760ad20a62e63cf9afcf9a07174ea61a1 | 63aad059a020e3ef784de29a7cd0c2af1e6a277a | refs/heads/master | 2021-01-10T08:49:05.885601 | 2016-03-11T21:21:33 | 2016-03-11T21:21:33 | 52,401,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 232 | r | ui.R | library(shiny)
library(plotly)
library(ggplot2)
shinyUI(fluidPage(
titlePanel("Flint Water Contamination"),
sidebarLayout(
sidebarPanel(
h4("Test Plot")),
mainPanel(
plotOutput("plot1")
)
)
)
) |
30092f46b9fa632a94afab8325a7969db674f14c | ae5a7c06fc184ff1c4029c1479a0a31f2cdd481a | /man/ScoreRushing.Rd | af0741b8fdd0bb41b160c102be8636de8dbe6efa | [] | no_license | kuhnrl30/Touchdown | 3726c7a2bb2cfba6ad67b7eec4d49ee8b91b13c4 | 36a620f134fb4669d709e60daf19e395b2db64ec | refs/heads/master | 2021-01-19T02:20:25.826833 | 2016-07-25T03:49:59 | 2016-07-25T03:49:59 | 41,216,698 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 962 | rd | ScoreRushing.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ScoreRushing.R
\name{ScoreRushing}
\alias{ScoreRushing}
\title{Score the rushing stats}
\usage{
ScoreRushing(x, RushingYds = c(10, 1), RushingTD = 6, FumbleLost = -2)
}
\arguments{
\item{x}{dataframe of player statistics data. Should be
of the format produced by the GetStats() function.}
\item{RushingYds}{Vector with the yardage increment and
point value. As an example, a player is awarded 1 point for
every 10 yards, then the score rule is of the format c(10,1)}
\item{RushingTD}{Points awarded for rushing touchdowns}
\item{FumbleLost}{Points awarded for each fumble lost.
Since this function is for offensive stats, a fumble lost would occur
when the defense recovers the fumble.}
}
\value{
1 by nrow(x) matrix with the total score for each row
}
\description{
Applies scoring rules to the player statistics. Default
values are set to the standard scoring values.
}
|
3ad426a297e95eeb7c1e5fcbca87562d796a73c9 | 92befee27f82e6637c7ed377890162c9c2070ca9 | /R/summary.lsem.R | f1a4e61558f390df2c6ec42f1e672834d3b21603 | [] | no_license | alexanderrobitzsch/sirt | 38e72ec47c1d93fe60af0587db582e5c4932dafb | deaa69695c8425450fff48f0914224392c15850f | refs/heads/master | 2023-08-31T14:50:52.255747 | 2023-08-29T09:30:54 | 2023-08-29T09:30:54 | 95,306,116 | 23 | 11 | null | 2021-04-22T10:23:19 | 2017-06-24T15:29:20 | R | UTF-8 | R | false | false | 3,312 | r | summary.lsem.R | ## File Name: summary.lsem.R
## File Version: 0.412
#-- summary lsem
summary.lsem <- function( object, file=NULL, digits=3, ... )
{
# open sink for a file
sirt_osink( file=file )
cat('-----------------------------------------------------------------\n')
cat('Local Structural Equation Model \n\n')
#-- print packages
packages <- c('sirt', 'lavaan')
if (object$use_lavaan_survey){
packages <- c(packages, 'lavaan.survey')
}
sirt_summary_print_packages(packages=packages)
#-- print R session
cat('\n')
sirt_summary_print_rsession()
cat(paste0('Function \'sirt::lsem.estimate\', type=\'', object$type,'\''), '\n\n')
#- print call
sirt_summary_print_call(CALL=object$CALL)
#-- print computation time
sirt_summary_print_computation_time_s1(object=object)
# space between equality sign
sp_eq <- paste0( c(' ', '=', ' '), collapse='')
cat( paste0( 'Number of observations in datasets', sp_eq,
round(object$N, digits) ), '\n')
cat( paste0( 'Used observations in analysis', sp_eq,
round(object$nobs, digits) ), '\n')
cat('Used sampling weights:', ! object$no_sampling_weights, '\n')
if ( object$type=='LSEM'){
cat( paste0( 'Bandwidth factor', sp_eq, round(object$h,digits) ), '\n')
cat( paste0( 'Bandwidth', sp_eq, round(object$bw,digits) ), '\n')
cat( paste0( 'Number of focal points for moderator', sp_eq,
length(object$moderator.grid ) ), '\n')
cat('\n')
cat('Used joint estimation:', object$est_joint, '\n')
cat('Used sufficient statistics:', object$sufficient_statistics, '\n')
cat('Used local linear smoothing:', object$loc_linear_smooth, '\n')
cat('Used pseudo weights:', object$use_pseudo_weights, '\n')
cat('Used lavaan package:', TRUE, '\n')
cat('Used lavaan.survey package:', object$use_lavaan_survey, '\n\n')
cat('Mean structure modelled:', object$is_meanstructure, '\n')
if (object$class_boot){
v1 <- paste0('\nStatistical inference based on ', object$R,
' bootstrap samples.')
cat(v1,'\n')
}
}
if ( object$type=='MGM'){
cat( paste0( 'Number of groups for moderator=',
length(object$moderator.grid ) ), '\n')
}
cat('\nlavaan Model\n')
cat(object$lavmodel)
if (object$est_joint){
cat('\n\n')
cat('Global Fit Statistics for Joint Estimation\n\n')
obji <- object$fitstats_joint
sirt_summary_print_objects(obji=obji, digits=digits)
}
cat('\n\n')
cat('Parameter Estimate Summary\n\n')
obji <- object$parameters_summary
sirt_summary_print_objects(obji=obji, digits=digits, from=2)
cat('\n')
cat('Distribution of Moderator: Density and Effective Sample Size\n\n')
cat( paste0('M=', round(object$m.moderator, digits), ' | SD=',
round(object$sd.moderator, digits), '\n\n') )
obji <- object$moderator.density
sirt_summary_print_objects(obji=obji, digits=digits, from=1)
cat('\n')
obji <- object$moderator.stat
sirt_summary_print_objects(obji=obji, digits=digits, from=2)
# close file
sirt_csink(file)
}
|
fd26877562d244d617b56f04f2ec149e1201f122 | 2a0e90441bb5edc22344aff9019dea4d825183bf | /auto_learner_manish_1.r | 1e7be0463b39cc70d1d21f31585d6a32fc6b0a24 | [] | no_license | srijan55/1ml | bb06ac62601beb6a87d4823564bd6fe487fd930f | 802f839e719a6a351c7d1b9b0695e995c4acfaf0 | refs/heads/master | 2021-01-22T06:58:50.134172 | 2015-07-02T02:07:45 | 2015-07-02T02:07:45 | 38,128,894 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,677 | r | auto_learner_manish_1.r | library(e1071)
library(Matrix)
library(SparseM)
##########################
## Load raw training data
##########################
rawdata<- read.csv("train_category.dat", sep="\t", nrows = 100 )
################################
## Feature Engineering
#################################
rawdata$UserID <-as.factor(rawdata$UserID) # Convert UserId to factor
rawdata.userid <- rawdata$UserID
###### Convert to a sparse matrix on events
s<- sparse.model.matrix(~0+Event,data=rawdata)
#multiply with count
y <- s*rawdata$Count
rawdata <-cbind( rawdata.userid, sparse.model.matrix(~0+Event+Count,data=rawdata))
rawdata$UserID <-rawdata.userid
###### Multiply count into columns to get the actual sparse matrix
col_num <- ncol(rawdata)
rawdata <-data.frame( UserID=rawdata[,1], (rawdata[,c(-1,-col_num)]*rawdata[,col_num]))
###### Aggregate on UserId's
rawdata <- aggregate(. ~ UserID, FUN=sum, data = rawdata)
#####Add labels from labeldata
labeldata<- read.csv("trainLabel.dat", sep="\t")
rawdata <- merge( rawdata, labeldata, by="UserID", all=FALSE)
rm(labeldata)# get rid of labeldata not needed now
gc()
#####Add weights to non-zero sparse factors
col_num <- ncol(rawdata)
weight <- rawdata[2:(col_num-1)]*5
#rawdata <- data.frame(UserID=rawdata$UserID, weight, Label=rawdata$Label)
rawdata <- data.frame(weight, Label=rawdata$Label)
rawdata$Label <- as.factor(rawdata$Label)
rm(weight)
#####################################
# TODO: Work with demographic data
#####################################
#demodata <- read.csv("../u360_demodata.tsv", sep = "\t", header = FALSE)
###demodata.age <- (demodata$PreGenderProb>0.6)? demodata$PreGender: demodata$RegGender
###names(demodata)<- c("UserID", "RegCountry", "RegBirth", "RegGender", "PreGender", "PreGenderProb", "RegAgeGrp", "PreAgeGrp", "PreAgeGrpProb")
#weighteddata <- weighteddata[,-1]
# augmenteddata<- merge(weighteddata, demodata, by="UserID", all.x=TRUE)
##########################
## train the model
##########################
data.model <- naiveBayes(Label~., data = rawdata)
##########################
## Load raw test data
##########################
rawdata<- read.csv("test_category.dat", sep="\t" )
labeltestdata<- read.csv("testID.dat", sep="\t")
################################
## Feature Engineering on test data
#################################
rawdata$UserID <-as.factor(rawdata$UserID) # Convert UserId to factor
###### Convert to a sparse matrix on events
rawdata<-data.frame( rawdata$UserID, model.matrix(~0+Event+Count,data=rawdata))
###### Multiply count into columns to get the actual sparse matrix
col_num <- ncol(rawdata)
rawdata <-data.frame( UserID=rawdata[,1], rawdata[,c(-1,-col_num)]*rawdata[,col_num])
###### Aggregate on UserId's
rawdata <- aggregate(. ~ UserID, FUN=sum, rawdata)
#####Add weights to non-zero sparse factors
col_num <- ncol(rawdata)
weight <- rawdata[2:(col_num)]*5
#rawdata <- data.frame(UserID=rawdata$UserID, weight, Label=rawdata$Label)
rawdata <- data.frame(weight)
rm(weight)
#########################
# Get the predictions
###########################
data.predictions <- predict(data.model, rawdata, type = "class")
#######################
# Create desired output
#######################
#####Add users from test ID's
labeldata<- read.csv("testID.dat", sep="\t")
labeldata <- data.frame(UserID=as.factor(labeldata$UserID))
rawdata<- data.frame(UserID=labeldata$UserID, Label=data.predictions)
#####Add users and labels from from train_label data
labeldata<- read.csv("trainLabel.dat", sep="\t")
labeldata <- data.frame(UserID=as.factor(labeldata$UserID), Label=as.factor(labeldata$Label))
rawdata <- merge( rawdata, labeldata, by="UserID", all=FALSE)
rawdata<-rbind(rawdata, labeldata)
rm(labeldata)
rm(data.predictions)
gc()
write.csv(file = "auto_output.csv", x=rawdata)
#rawtestdata$UserID <-as.factor(rawtestdata$UserID)
#sparsetestdata<-data.frame( rawtestdata$UserID, model.matrix(~0+Event+Count,data=rawtestdata))
#col_num <- ncol(sparsetestdata)
#multipledtestdata <-data.frame( UserID=sparsetestdata[,1], sparsetestdata[,c(-1,-col_num)]*sparsetestdata[,col_num])
#aggregatetestdata <- aggregate(. ~ UserID, FUN=sum, multipledtestdata)
#labelledtestdata <- merge( aggregatetestdata, labeltedata, by="UserID", all=FALSE)
#col_num <- ncol(aggregatetestdata)
#weight <- aggregatetestdata[2:(col_num-1)]*5
#weightedtestdata <- data.frame(UserID=aggregatetestdata$UserID, weight)
#weighteddata$Label <- as.factor(weighteddata$Label)
#random.rows.train <- sample(1:nrow(weighteddata), 0.5*nrow(weighteddata), replace=F)
#weighteddata.train <- weighteddata[random.rows.train,]
#dim(weighteddata.train)
## select the other 1/2 left as the testing data
#random.rows.test <- setdiff(1:nrow(weighteddata),random.rows.train)
#weighteddata.test <- weighteddata[random.rows.test,]
#dim(weighteddata.test)
## fitting decision model on training set
#weighteddata.model <- naiveBayes(Label~., data = weighteddata)
## MODEL EVALUATION
## make prediction using decision model
#weighteddata.test.predictions <- predict(weighteddata.model, weightedtestdata, type = "class")
## extract out the observations in testing set
#weighteddata.test.observations <- weighteddata.test$Label
## show the confusion matrix
#confusion.matrix <- table(weighteddata.test.predictions, weighteddata.test.observations)
#confusion.matrix
## calculate the accuracy in testing set
#accuracy <- sum(diag(confusion.matrix)) / sum(confusion.matrix)
#accuracy
#predictedata<- data.frame(UserID=weightedtestdata$UserID, Label=weighteddata.test.predictions)
#labeldata <- data.frame(UserID=as.factor(labeldata$UserID), Label=as.factor(labeldata$Label))
#x<-rbind(predictedata, labeldata)
|
74ccce38fd397f404e954cf868cc6c776fb26e74 | 2d88e86736d81b32e957b62bd8b0041e2a9778ad | /R/scores.tables.tweak.R | 1e4cae57350c86dd2118b7c9151872ffa2c8a421 | [] | no_license | cran/amber | c1659595049f230f54db3893704fc67ddb2429ed | e6ef59a25270413a1875c84feac786551bf69315 | refs/heads/master | 2021-07-23T06:25:02.408885 | 2020-08-28T10:20:02 | 2020-08-28T10:20:02 | 212,134,119 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,522 | r | scores.tables.tweak.R | ################################################################################
#' Tweak summary table
#' @description This function allows the user to tweak the summary table computed
#' by \link{scores.tables}. Contrary to \link{scores.tables}, this function can be used
#' to create a single summary table that includes the most important metrics only.
#' The user can specify what variables to include and in what order they should appear.
#' @param myVariables An R object with variable names of variables that should be included in table, e.g. c('GPP', 'RECO', 'NEE')
#' @param myCaption A string that is used as table caption, e.g. 'Globally averaged statistical metrics'.
#' @param inputDir A string that gives the input directory, e.g. '/home/project/study'.
#' @param outputDir A string that gives the output directory, e.g. '/home/project/study'. The output will only be written if the user specifies an output directory.
#' @return One table in LaTeX format that shows a subset of statistical metrics
#' @examples
#' library(amber)
#' library(classInt)
#' library(doParallel)
#' library(foreach)
#' library(Hmisc)
#' library(latex2exp)
#' library(ncdf4)
#' library(parallel)
#' library(raster)
#' library(rgdal)
#' library(rgeos)
#' library(scico)
#' library(sp)
#' library(stats)
#' library(utils)
#' library(viridis)
#' library(xtable)
#'
#' myInputDir <- paste(system.file('extdata', package = 'amber'), 'scores', sep = '/')
#' myVariables <- c('GPP', 'LAI', 'ALBS')
#' scores.tables.tweak(myVariables = myVariables, inputDir = myInputDir)
#' @export
scores.tables.tweak <- function(myVariables, myCaption = "Globally averaged statistical metrics", inputDir = getwd(), outputDir = FALSE) {
# summary table with globally averaged inputs for computing scores
my.list <- list.files(path = inputDir, pattern = "scoreinputs_")
my.files <- paste(inputDir, my.list, sep = "/")
data <- lapply(my.files, utils::read.table)
data <- do.call("rbind", data)
colnames(data)
myOrder <- seq(1, length(myVariables), 1)
myOrder <- data.frame(myVariables, myOrder)
colnames(myOrder) <- c("variable.name", "order")
data <- merge(data, myOrder, by = "variable.name")
data <- data[order(data$order, data$ref.id), ]
data <- subset(data, select = -c(order))
colnames(data) <- c("Name", "Variable", "Reference", "Unit", "$v_{mod}$", "$v_{ref}$", "Bias", "Bias (\\%)", "$\\sigma_{ref}$",
"$\\epsilon_{bias}$ (-)", "$S_{bias}$ (-)", "$rmse$", "$crmse$", "$\\sigma_{ref}$", "$\\epsilon_{rmse}$ (-)", "$S_{rmse}$ (-)",
"$max_{cmod}$", "$max_{cref}$", "$\\theta$ (months)", "$S_{phase}$ (-)", "$iav_{mod}$", "$iav_{ref}$", "$\\epsilon_{iav}$ (-)",
"$S_{iav}$", "$\\sigma_{\\overline{v_{mod}}}$", "$\\sigma_{\\overline{v_{ref}}}$", "$\\sigma$ (-)", "$R$ (-)", "$S_{dist}$ (-)")
rownames(data) <- c() # omit rownames
# Make a table that only includes selected reference data and metrics
metricsTable <- data[c(1, 3, 4, 5, 6, 7, 8, 13, 19, 23, 27, 28)] # selection of variables
rownames(metricsTable) <- c() # omit rownames
# convert to LaTeX
metricsTable <- xtable::xtable(metricsTable)
xtable::caption(metricsTable) <- myCaption
if (outputDir != FALSE) {
xtable::print.xtable(metricsTable, include.rownames = FALSE, label = "tab:global_stats", type = "latex", file = "metricsTable.tex",
caption.placement = "top", sanitize.text.function = function(x) {
x
})
}
}
|
977dca5df9467a27cb8d8503ed42572c8bf96028 | a49107cd976c16910f405e92891f81089d46b235 | /16 Dates and Times.R | 8d8f841c0e2b6e5d052bfaca3d7af932a3a8ea5a | [] | no_license | nmoorenz/R4DS | 8fcf397516d95054e24287249267fe58ce6b7d4b | 1227401db2459120b2890ff5f2a391c1daf8b13b | refs/heads/master | 2021-09-18T10:22:10.851213 | 2018-07-12T21:16:20 | 2018-07-12T21:16:20 | 114,934,177 | 0 | 0 | null | 2018-07-12T21:16:21 | 2017-12-20T21:47:41 | R | UTF-8 | R | false | false | 6,694 | r | 16 Dates and Times.R |
############################################
# 16 Dates and Times
library(tidyverse)
library(lubridate)
library(nycflights13)
# 16.2 Creating date/times
today()
now()
# 16.2.1 From strings
# ymd() mdy() dmy()
ymd("2017-01-31")
ymd(20170630)
ymd_hm("2017-12-25 10:00", tz = "NZ")
# 16.2.2 from components
flights %>%
select(year, month, day, hour, minute) %>%
mutate(departure = make_datetime(year, month, day, hour, minute))
# modulus arithmetic for some funky times
make_datetime_100 <- function(year, month, day, time) {
make_datetime(year, month, day, time %/% 100, time %% 100)
}
flights_dt <- flights %>%
filter(!is.na(dep_time), !is.na(arr_time)) %>%
mutate(
dep_time = make_datetime_100(year, month, day, dep_time),
arr_time = make_datetime_100(year, month, day, arr_time),
sched_dep_time = make_datetime_100(year, month, day, sched_dep_time),
sched_arr_time = make_datetime_100(year, month, day, sched_arr_time)
) %>%
select(origin, dest, ends_with("delay"), ends_with("time"))
flights_dt
# visualise departures in a year
flights_dt %>%
ggplot(aes(dep_time)) +
geom_freqpoly(binwidth = 86400)
# or in a single day
flights_dt %>%
filter(dep_time < ymd(20130102)) %>%
ggplot(aes(dep_time)) +
geom_freqpoly(binwidth = 600)
# 16.2.3 From other types
as_datetime(today())
as_date(now())
# Unix Epoch is 1970-01-01
########
# 16.2.4 Exercises
# invalid
ymd(c("20101010", "bananas"))
# tzone
today(tzone = "NZ")
# lubridate functions
d1 <- mdy("January 1, 2010")
d2 <- ymd("2015-Mar-07")
d3 <- dmy("06-Jun-2017")
d4 <- mdy(c("August 19 (2015)", "July 1 (2015)"))
d5 <- mdy("12/30/14") # Dec 30, 2014
######
# 16.3 Date-time components
datetime <- ymd_hms("2016-07-08 12:34:56")
year(datetime)
month(datetime)
mday(datetime) # day of month
yday(datetime) # day of year
wday(datetime) # day of week
month(datetime, label = TRUE) # abbreviated name of month
wday(datetime, label = TRUE, abbr = FALSE) # full name of weekday
flights_dt %>%
mutate(wday = wday(dep_time, label = TRUE)) %>%
ggplot(aes(x = wday)) +
geom_bar()
# on the hour or half hour don't have as much delay
flights_dt %>%
mutate(minute = minute(dep_time)) %>%
group_by(minute) %>%
summarise(
avg_delay = mean(arr_delay, na.rm = TRUE),
n = n()) %>%
ggplot(aes(minute, avg_delay)) +
geom_line()
sched_dep <- flights_dt %>%
mutate(minute = minute(sched_dep_time)) %>%
group_by(minute) %>%
summarise(
avg_delay = mean(arr_delay, na.rm = TRUE),
n = n())
ggplot(sched_dep, aes(minute, avg_delay)) +
geom_line()
ggplot(sched_dep, aes(minute, n)) +
geom_line()
# 16.3.2 Rounding (good for grouping)
flights_dt %>%
count(week = floor_date(dep_time, "week")) %>%
ggplot(aes(week, n)) +
geom_line()
# 16.3.3 setting components
(datetime <- ymd_hms("2016-07-08 12:34:56"))
year(datetime) <- 2010
month(datetime) <- 01
hour(datetime) <- 02
datetime
update(datetime, year = 2020, month = 4, mday = 22, minute = 22)
# large values will roll over, negatives go backwards
ymd("2015-01-01") %>% update(mday = 35)
ymd("2015-03-01") %>% update(hour = -20)
flights_dt %>%
mutate(dep_hour = update(dep_time, yday = 1)) %>%
ggplot(aes(dep_hour)) +
geom_freqpoly(binwidth = 600)
########
# 16.3.4 Exercises
flights_dt %>%
mutate(
dep_hour = update(dep_time, yday = 1),
dep_month = month(dep_time)
) %>%
ggplot(aes(dep_hour, colour = dep_month, group = dep_month)) +
geom_freqpoly(binwidth = 600)
# dep_time, sched_dep_time, dep_delay
flights_dep <- flights %>%
filter(!is.na(dep_time)) %>%
mutate(
dep_time = make_datetime_100(year, month, day, dep_time),
sched_dep_time = make_datetime_100(year, month, day, sched_dep_time),
est_dep_time = sched_dep_time + dep_delay * 60
) %>%
select(origin, dest, ends_with("dep_time"), dep_delay) %>%
filter(dep_time != est_dep_time)
flights_dep
# adjusting timezones for airports and flight times!
# average delay time over day
flights_dt %>%
mutate(dep_hour = update(sched_dep_time, yday = 1)) %>%
group_by(dep_hour) %>%
mutate(delays = mean(dep_delay)) %>%
ggplot(aes(dep_hour, delays)) +
geom_line()
# day of week
flights_dt %>%
mutate(weekday = wday(sched_dep_time)) %>%
group_by(weekday) %>%
mutate(delays = mean(dep_delay)) %>%
ggplot(aes(weekday, delays)) +
geom_line()
# carats and sched_dep_time
flights_dt %>%
mutate(dep_hour = update(sched_dep_time, yday = 1, hour = 1)) %>%
ggplot(aes(dep_hour)) +
geom_freqpoly()
diamonds %>%
ggplot(aes(carat)) +
geom_freqpoly()
######
# 16.4 Time spans
# durations, periods, intervals
# lubridate always measures duration in seconds - vectorised construction
dseconds(20)
dminutes(30)
dhours(36)
ddays(0:7)
dweeks(2) + dyears(2)
today() + ddays(1)
today() + 1
# periods more human like
seconds(15)
minutes(15)
hours(15)
days(15)
months(15)
weeks(15)
years(0:3)
10 * months(10)
today() + hours(50)
# can use these to fix some times in the flights data
flights_dt %>%
filter(arr_time < dep_time)
flights_dt <- flights_dt %>%
mutate(
overnight = arr_time < dep_time,
arr_time = arr_time + days(overnight * 1),
sched_arr_time = sched_arr_time + days(overnight * 1)
)
flights_dt %>%
filter(arr_time < dep_time)
# 16.4.3 Intervals
dyears(1) / ddays(365) # should return one because both are actually in seconds
years(1) / days(1) # warning because not always true (leap year)
# more accurate to use intervals
next_year <- today() + years(1)
(today() %--% next_year) / ddays(1)
(today() %--% next_year) %/% days(1)
########
# 16.4.5 Exercises
# there's no dmonths() because months are all different!
# overnight is boolean, only multiplies if true
# vector of dates first of every month in 2015
ymd("2015-01-01") + months(0:11)
make_date(year(today()), 1, 1) + months(0:11)
# function for age from birthdate
my_age <- function(birthdate) {
(birthdate %--% today()) / dyears(1)
}
my_age(ymd("1984-09-20"))
# wrong?
(today() %--% (today() + years(1))) / months(1)
######
# 16.5 Timezones
Sys.time()
Sys.timezone()
(x1 <- ymd_hms("2015-06-01 12:00:00", tz = "America/New_York"))
(x2 <- ymd_hms("2015-06-01 18:00:00", tz = "Europe/Copenhagen"))
(x3 <- ymd_hms("2015-06-02 04:00:00", tz = "Pacific/Auckland"))
# these are all the same!
x1 - x2
x2 - x3
# can combine these
(x4 <- c(x1, x2, x3))
|
63a7afb8be5cb5181294e32011df820beebdd09f | 09a34862ad70328988389e8a304dcfcfddd2146e | /old_versions/toyData.R | c48825ee51a24a2af3137693b216722c1382bcc1 | [] | no_license | kathiesun/TReC_matnut | 63c33db7ebaee80ffc262975b27e41a3d32ef997 | e8a2f463960a3c59e54d39690a065b0563a957a8 | refs/heads/master | 2023-04-19T11:42:38.918064 | 2021-05-09T19:29:52 | 2021-05-09T19:29:52 | 321,777,614 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,684 | r | toyData.R | setwd("~/matnut/src")
library(rstan)
library(tidyverse)
# ---------------------------------
# Generate toy data
# ---------------------------------
set.seed(1)
nmice=2
ngenes=3
nkmer=4
total_counts_per_kmer = pk_matrix = data = list()
pg = rbeta(ngenes, 1, 1)
total_counts_gene = floor(runif(ngenes,0,1) * 10000)
for(j in 1:nmice){
pk_matrix[[j]] = t(sapply(1:ngenes, function(x) rbeta(nkmer,total_counts_gene[x]*pg[x] + 1,(1-pg[x])*total_counts_gene[x]+1)))
total_counts_per_kmer = do.call("rbind", lapply(total_counts_gene, function(x) rpois(nkmer,x/nkmer)))
counts_per_kmer_a1 = sapply(1:length(pk_matrix[[j]]), function(x) rbinom(1, t(total_counts_per_kmer)[x], t(pk_matrix[[j]])[x]))
temp_dat1 = cbind(allele = 1,
count = counts_per_kmer_a1,
kmer = seq(1:nkmer),
gene = rep(1:ngenes, each=nkmer),
mouse = j)
counts_per_kmer_a2 = sapply(1:length(pk_matrix[[j]]), function(x) rbinom(1, t(total_counts_per_kmer)[x], t(1-pk_matrix[[j]])[x]))
temp_dat2 = cbind(allele = 2,
count = counts_per_kmer_a2,
kmer = seq(1:nkmer),
gene = rep(1:ngenes, each=nkmer),
mouse = j)
data[[j]] = rbind(temp_dat1, temp_dat2)
}
data <- do.call("rbind", data)
as.tibble(data) %>%
filter(mouse == 1) %>%
group_by(gene, kmer) %>%
summarise(y1 = count[allele == 1],
sum = (count[allele == 1] + count[allele == 2]),
ratio = count[allele == 1] / (count[allele == 1] + count[allele == 2])) %>%
filter(gene==1) ->
data_model
confInt %>% filter(gene_name == "Reps2", Pup.ID == 379) %>%
group_by(pos) %>% dplyr::slice(1) -> data_model
## Create Stan data
standat <- list(N = nrow(remNanRatios),
K = length(unique(remNanRatios$pos)),
G = length(unique(remNanRatios$gene_name)),
P = 1,
y_gk = remNanRatios$count,
n_gk = remNanRatios$total,
kmer = as.numeric(as.factor(remNanRatios$pos)),
gene = as.numeric(as.factor(remNanRatios$gene_name)))
fileName <- "matnut/logit_binom.stan"
stan_code <- readChar(fileName, file.info(fileName)$size)
cat(stan_code)
chains=2
iter=10000
warmup=round((floor((iter*0.25)/(iter/10))*(iter/10)))
thin=50
resStan <- stan(model_code = stan_code, data = standat,
chains = 2, iter = iter, warmup = warmup, thin = thin)
stanmcmc<- As.mcmc.list(resStan)
summcmc <- summary(stanmcmc)
traceplot(stanmcmc[,1,drop=F])
tset <- sapply(1:(ncol(stanmcmc[[1]])-1), function(x) HPDinterval(stanmcmc[,x,drop=T]), simplify=F)
sigTest <- rep(0, length = length(tset))
sigTest[grep("odds", colnames(stanmcmc[[1]]))] <- 1
sigTest[grep("prob", colnames(stanmcmc[[1]]))] <- 0.5
isSig <- sapply(1:length(tset), function(x) sapply(1:chains, function(y) ifelse(((tset[[x]][[y]][1] < sigTest[x] && tset[[x]][[y]][2] < sigTest[x])
|| (tset[[x]][[y]][1] > sigTest[x] && tset[[x]][[y]][2] > sigTest[x])), T, F)))
trueSig <- intersect(which(isSig[1,] == T), which(isSig[2,] == T))
for(i in 1:10){
ind = trueSig[i]
traceplot(stanmcmc[,ind,drop=F])
}
######################## IGNORE ############################
##########
# STAN #
##########
stan_file <- file.path("../../..","Dropbox", "doubleGLM.stan")
#dissector.sm <- stan_model(file=stan_file)
tsstan
an_fit <- list()
vceg <- list()
for(j in 1:2){
covar <- ifelse(j==1, "Sex", "Diet")
for(i in 1:7){
miss <- which(is.na(compl_phen[[j]][,allvars[[j]][i]]))
mouseID_miss <- compl_phen[[j]]$MouseID[miss]
remove <- which(as.numeric(unlist(strsplit(colnames(compl_kin[[j]]),"[.]"))[c(F,T,F)]) %in% mouseID_miss)
if (length(mouseID_miss) > 0){
temp_phen <- compl_phen[[j]][-which(compl_phen[[j]]$MouseID %in% mouseID_miss),]
temp_kin <- compl_kin[[j]][-remove, -remove]
} else {
temp_phen <- data.frame(compl_phen[[j]])
temp_kin <- compl_kin[[j]]
}
vceg$x <- temp_phen[,covar]
vceg$R <- temp_kin
vceg$Rinv <- solve(vceg$R)
vceg$y <- as.vector(scale(as.numeric(temp_phen[,allvars[[j]][i]])))
stan_fit <- stan(file = stan_file, control = list(adapt_delta = 0.8),
data = list(num_cov = length(unique(vceg$x)),
N = nrow(temp_kin),
phenotype = vceg$y,
cov = vceg$x,
R = temp_kin), chains=3, iter = 5000,
thin=10, warmup = 1000)
}
}
#h2_mcmc <- as.mcmc(stan_fit@sim$samples[[1]]$h2)
#mcmc_trace(regex_pars = 'sigma')
rstan::traceplot(stan_fit, pars = 'h2', inc_warmup=F)
rstan::traceplot(stan_fit, pars = 'grand_sig2', inc_warmup=F)
rstan::traceplot(stan_fit, pars = 'cov_vef', inc_warmup=F)
# ---------------------------------
# Prior parameters
# ---------------------------------
n=10
a <- 1; b <-1;
S <- 20000
tau0 <- 200
sig0 <- 1000
nu0 <- 10
mu0=120
X <- matrix(NA, nrow=n, ncol=S)
sig.1 <- numeric(S)
sig.2 <- numeric(S)
theta.1 <- numeric(S)
theta.2 <- numeric(S)
p <- numeric(S)
theta.min <- numeric(S)
theta.max <- numeric(S)
# ---------------------------------
# Random initialization to groups
# ---------------------------------
p[1] <- rbeta(1, a,b)
X[,1]<- rbinom(n, 1, p[1])
X[,1]<- ifelse(X[,1]==1, 2, 1)
theta.1[1]<- mean(Y)
theta.2[1]<- mean(Y)
sig.1[1]<- var(Y)
sig.2[1]<- var(Y)
# ---------------------------------
# Gibbs sampling algorithm
# ---------------------------------
for (i in 2:S){
# Update for p
n1 <- sum(X[,i-1]==1)
n2 <- sum(X[,i-1]==2)
p[i] <- rbeta(1, a + n1, b + n2)
mean.y1 <- mean(Y[which(X[,i-1] == 1)])
mean.y2 <- mean(Y[which(X[,i-1] == 2)])
var.y1 <- var(Y[which(X[,i-1] == 1)])
var.y2 <- var(Y[which(X[,i-1] == 2)])
# Update parameters
sig.1[i] <- 1/rgamma(1, (n1/2)+(nu0/2), 0.5*((n1-1)*var.y1 + n1*(mean.y1-theta.1[i-1])^2))
sig.2[i] <- 1/rgamma(1, (n2/2)+(nu0/2), 0.5*((n2-1)*var.y2 + n1*(mean.y2-theta.2[i-1])^2))
a1 <-n1*(1/sig.1[i-1])+(1/tau0)
b1 <- n1*(1/sig.1[i-1])*mean.y1+(1/tau0)*mu0
theta.1[i] <- rnorm(1, b1/a1, sqrt(1/a1))
a2 <-n2*(1/sig.2[i-1])+(1/tau0)
b2 <- n2*(1/sig.2[i-1])*mean.y2+(1/tau0)*mu0
theta.2[i] <- rnorm(1, b2/a2, sqrt(1/a2))
# calculate theta statistics
theta.min[i] <- min(theta.1[i], theta.2[i])
theta.max[i] <- max(theta.1[i], theta.2[i])
# Update for X's
for (j in 1:n){
w1 <- p[i-1]*dnorm(Y[j], theta.1[i], sqrt(sig.1[i]))
w0 <- (1-p[i-1])*dnorm(Y[j], theta.2[i], sqrt(sig.2[i]))
w <- w1/(w1+w0)
X[j,i]<- sample(1:0, 1, prob=c(w,1-w))
X[j,i]<- ifelse(X[j,i]==0, 2, 1)
}
}
hist(theta.1, freq = FALSE, xlim = c(90, 190), ylim = c(0, 0.17),
xlab = expression(theta),
main = expression(paste("Approx. of p(",theta,"|y)", sep = "")))
lines(density(theta.1), col = "blue")
lines(density(theta.2), col = "red")
hist(theta.2, freq = FALSE,
add = T)
# ---------------------------------
# Part c
# ---------------------------------
par(mfrow=c(1,2))
plot(acf(theta.min))
plot(acf(theta.max))
print(acf(theta.min, plot=F))
print(acf(theta.max, plot=F))
c(effectiveSize(mcmc(theta.min)),
effectiveSize(mcmc(theta.max)))
|
9d360197f39944a7f69d4bef7fe2e7995d9c7f3b | da1ae08c144c508573a8482a71bd2a2ebe5c21e9 | /R/s3.R | 8beb6fa2b043d7c7eef1d17cf9ca6c065c00367e | [
"MIT"
] | permissive | AmrR101/singleCellHaystack | 48c068eb35319edf7842db17ae6542a715f5420c | 68f41d8cc9cb44b7eff95f318592f33306dae0a4 | refs/heads/master | 2023-02-10T01:12:04.548101 | 2021-01-07T08:44:41 | 2021-01-07T08:44:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,167 | r | s3.R | #' The main Haystack function
#'
#' @param x a matrix or other object from which coordinates of cells can be extracted.
#' @param dim1 column index or name of matrix for x-axis coordinates.
#' @param dim2 column index or name of matrix for y-axis coordinates.
#' @param assay name of assay data for Seurat method.
#' @param slot name of slot for assay data for Seurat method.
#' @param coord name of coordinates slot for specific methods.
#' @param dims dimensions from coord to use. By default, all.
#' @param cutoff cutoff for detection.
#' @param method choose between highD (default) and 2D haystack.
#' @param detection A logical matrix showing which genes (rows) are detected in which cells (columns)
#' @param use.advanced.sampling If NULL naive sampling is used. If a vector is given (of length = no. of cells) sampling is done according to the values in the vector.
#' @param dir.randomization If NULL, no output is made about the random sampling step. If not NULL, files related to the randomizations are printed to this directory.
#' @param scale Logical (default=TRUE) indicating whether input coordinates in x should be scaled to mean 0 and standard deviation 1.
#' @param grid.points An integer specifying the number of centers (gridpoints) to be used for estimating the density distributions of cells. Default is set to 100.
#' @param grid.method The method to decide grid points for estimating the density in the high-dimensional space. Should be "centroid" (default) or "seeding".
#' @param ... further parameters passed down to methods.
#'
#' @return An object of class "haystack"
#' @export
#'
haystack <- function(x, ...) {
UseMethod("haystack")
}
#' @rdname haystack
#' @export
haystack.matrix <- function(x, dim1 = 1, dim2 = 2, detection, method = "highD", use.advanced.sampling = NULL, dir.randomization = NULL, scale = TRUE, grid.points = 100, grid.method = "centroid", ...) {
method <- match.arg(method, c("highD", "2D"))
switch(method,
"highD" = {
haystack_highD(
x,
detection = detection,
use.advanced.sampling = use.advanced.sampling,
dir.randomization = dir.randomization,
scale = scale,
grid.points = grid.points,
grid.method = grid.method, ...)
},
"2D" = {
haystack_2D(
x[, dim1],
x[, dim2],
detection = detection,
use.advanced.sampling = use.advanced.sampling,
dir.randomization = dir.randomization, ...)
}
)
}
#' @rdname haystack
#' @export
haystack.data.frame <- function(x, dim1 = 1, dim2 = 2, detection, method = "highD", use.advanced.sampling = NULL, dir.randomization = NULL, scale = TRUE, grid.points = 100, grid.method = "centroid", ...) {
haystack(as.matrix(x), dim1 = dim1, dim2 = dim2, detection = detection, method = method, use.advanced.sampling = use.advanced.sampling, dir.randomization = dir.randomization, scale = scale, grid.points = grid.points, grid.method = grid.method, ...)
}
#' @rdname haystack
#' @export
haystack.Seurat <- function(x, assay = "RNA", slot = "data", coord = "pca", dims = NULL, cutoff = 1, method = NULL, use.advanced.sampling = NULL, ...) {
if (!requireNamespace("Seurat", quietly = TRUE)) {
stop("Package \"Seurat\" needed for this function to work. Please install it.", call. = FALSE)
}
y <- Seurat::GetAssayData(x, slot = slot, assay = assay)
z <- Seurat::Embeddings(x, coord)
if (! is.null(dims)) {
z <- z[, dims, drop = FALSE]
}
if(is.null(method)){
if(ncol(z)==2){
method <- "2D"
} else if(ncol(z)>2){
method <- "highD"
}
message("### Input coordinates have ",ncol(z)," dimensions, so method set to \"",method,"\"")
}
y <- y > cutoff
if (use.advanced.sampling) {
use.advanced.sampling = colSums(y)
}
haystack(z, detection = y, method = method, use.advanced.sampling = use.advanced.sampling, ...)
}
#' @rdname haystack
#' @export
haystack.SingleCellExperiment <- function(x, assay = "counts", coord = "TSNE", dims = NULL, cutoff = 1, method = NULL, use.advanced.sampling = NULL, ...) {
if (!requireNamespace("SummarizedExperiment", quietly = TRUE)) {
stop("Package \"SummarizedExperiment\" needed for this function to work. Please install it.", call. = FALSE)
}
if (!requireNamespace("SingleCellExperiment", quietly = TRUE)) {
stop("Package \"SingleCellExperiment\" needed for this function to work. Please install it.", call. = FALSE)
}
y <- SummarizedExperiment::assay(x, assay)
z <- SingleCellExperiment::reducedDim(x, coord)
if(is.null(z)) {
stop("No coordinates named ", coord, " found.")
}
if (! is.null(dims)) {
z <- z[, dims, drop = FALSE]
}
if(is.null(method)){
if(ncol(z)==2){
method <- "2D"
} else if(ncol(z)>2){
method <- "highD"
}
message("### Input coordinates have ",ncol(z)," dimensions, so method set to \"",method,"\"")
}
y <- y > cutoff
if (use.advanced.sampling) {
use.advanced.sampling = colSums(y)
}
haystack(z, detection = y, method = method, use.advanced.sampling = use.advanced.sampling, ...)
}
#' Visualizing the detection/expression of a gene in a 2D plot
#'
#' @param x a matrix or other object from which coordinates of cells can be extracted.
#' @param dim1 column index or name of matrix for x-axis coordinates.
#' @param dim2 column index or name of matrix for y-axis coordinates.
#' @param assay name of assay data for Seurat method.
#' @param slot name of slot for assay data for Seurat method.
#' @param coord name of coordinates slot for specific methods.
#' @param ... further parameters passed to plot_gene_haystack_raw().
#'
#' @export
#'
plot_gene_haystack <- function(x, ...) {
UseMethod("plot_gene_haystack")
}
#' @rdname plot_gene_haystack
#' @export
plot_gene_haystack.matrix <- function(x, dim1 = 1, dim2 = 2, ...) {
plot_gene_haystack_raw(x[, dim1], x[, dim2], ...)
}
#' @rdname plot_gene_haystack
#' @export
plot_gene_haystack.data.frame <- function(x, dim1 = 1, dim2 = 2, ...) {
plot_gene_haystack_raw(x[, dim1], x[, dim2], ...)
}
#' @rdname plot_gene_haystack
#' @export
plot_gene_haystack.SingleCellExperiment <- function(x, dim1 = 1, dim2 = 2, assay = "counts", coord = "TSNE", ...) {
if (!requireNamespace("SummarizedExperiment", quietly = TRUE)) {
stop("Package \"SummarizedExperiment\" needed for this function to work. Please install it.", call. = FALSE)
}
if (!requireNamespace("SingleCellExperiment", quietly = TRUE)) {
stop("Package \"SingleCellExperiment\" needed for this function to work. Please install it.", call. = FALSE)
}
y <- SummarizedExperiment::assay(x, assay)
z <- SingleCellExperiment::reducedDim(x, coord)
plot_gene_haystack_raw(z[, dim1], z[, dim2], expression = y, ...)
}
#' @rdname plot_gene_haystack
#' @export
plot_gene_haystack.Seurat <- function(x, dim1 = 1, dim2 = 2, assay = "RNA", slot = "data", coord = "tsne", ...) {
if (!requireNamespace("Seurat", quietly = TRUE)) {
stop("Package \"Seurat\" needed for this function to work. Please install it.", call. = FALSE)
}
y <- Seurat::GetAssayData(x, slot = slot, assay = assay)
z <- Seurat::Embeddings(x, coord)
plot_gene_haystack_raw(z[, dim1], z[, dim2], expression = y, ...)
}
#' Visualizing the detection/expression of a set of genes in a 2D plot
#'
#' @param x a matrix or other object from which coordinates of cells can be extracted.
#' @param dim1 column index or name of matrix for x-axis coordinates.
#' @param dim2 column index or name of matrix for y-axis coordinates.
#' @param assay name of assay data for Seurat method.
#' @param slot name of slot for assay data for Seurat method.
#' @param coord name of coordinates slot for specific methods.
#' @param ... further parameters passed to plot_gene_haystack_raw().
#'
#' @export
#'
plot_gene_set_haystack <- function(x, ...) {
UseMethod("plot_gene_set_haystack")
}
#' @rdname plot_gene_set_haystack
#' @export
plot_gene_set_haystack.matrix <- function(x, dim1 = 1, dim2 = 2, ...) {
plot_gene_set_haystack_raw(x[, dim1], x[, dim2], ...)
}
#' @rdname plot_gene_set_haystack
#' @export
plot_gene_set_haystack.data.frame <- function(x, dim1 = 1, dim2 = 2, ...) {
plot_gene_set_haystack_raw(x[, dim1], x[, dim2], ...)
}
#' @rdname plot_gene_set_haystack
#' @export
plot_gene_set_haystack.SingleCellExperiment <- function(x, dim1 = 1, dim2 = 2, assay = "counts", coord = "TSNE", ...) {
if (!requireNamespace("SummarizedExperiment", quietly = TRUE)) {
stop("Package \"SummarizedExperiment\" needed for this function to work. Please install it.", call. = FALSE)
}
if (!requireNamespace("SingleCellExperiment", quietly = TRUE)) {
stop("Package \"SingleCellExperiment\" needed for this function to work. Please install it.", call. = FALSE)
}
y <- SummarizedExperiment::assay(x, assay)
z <- SingleCellExperiment::reducedDim(x, coord)
plot_gene_set_haystack_raw(z[, dim1], z[, dim2], detection = y > 1, ...)
}
#' @rdname plot_gene_set_haystack
#' @export
plot_gene_set_haystack.Seurat <- function(x, dim1 = 1, dim2 = 2, assay = "RNA", slot = "data", coord = "tsne", ...) {
if (!requireNamespace("Seurat", quietly = TRUE)) {
stop("Package \"Seurat\" needed for this function to work. Please install it.", call. = FALSE)
}
y <- Seurat::GetAssayData(x, slot = slot, assay = assay)
z <- Seurat::Embeddings(x, coord)
plot_gene_set_haystack_raw(z[, dim1], z[, dim2], detection = y > 1, ...)
}
#' Function for hierarchical clustering of genes according to their expression distribution in 2D or multi-dimensional space
#'
#' @param x a matrix or other object from which coordinates of cells can be extracted.
#' @param dim1 column index or name of matrix for x-axis coordinates.
#' @param dim2 column index or name of matrix for y-axis coordinates.
#' @param ... further parameters passed down to methods.
#'
#' @export
#'
hclust_haystack <- function(x, ...) {
UseMethod("hclust_haystack")
}
#' @rdname hclust_haystack
#' @export
hclust_haystack.matrix <- function(x, dim1 = 1, dim2 = 2, ...) {
hclust_haystack_raw(x[, dim1], x[, dim2], ...)
}
#' @rdname hclust_haystack
#' @export
hclust_haystack.data.frame <- function(x, dim1 = 1, dim2 = 2, ...) {
hclust_haystack_raw(x[, dim1], x[, dim2], ...)
}
#' Function for k-means clustering of genes according to their expression distribution in 2D or multi-dimensional space
#'
#' @param x a matrix or other object from which coordinates of cells can be extracted.
#' @param dim1 column index or name of matrix for x-axis coordinates.
#' @param dim2 column index or name of matrix for y-axis coordinates.
#' @param ... further parameters passed down to methods.
#'
#' @export
#'
kmeans_haystack <- function(x, ...) {
UseMethod("kmeans_haystack")
}
#' @rdname kmeans_haystack
#' @export
kmeans_haystack.matrix <- function(x, dim1 = 1, dim2 = 2, ...) {
kmeans_haystack_raw(x[, dim1], x[, dim2], ...)
}
#' @rdname kmeans_haystack
#' @export
kmeans_haystack.data.frame <- function(x, dim1 = 1, dim2 = 2, ...) {
kmeans_haystack_raw(x[, dim1], x[, dim2], ...)
}
|
a7be91c8531bcdb9ab6f7c76e049369856581dde | 3e374bdfbc0d3bb2933fc248285263dd3e45ec48 | /R/xgboost.R | 52d1017ea94a7a01a9d572afe400d4290cc9970e | [
"Apache-2.0"
] | permissive | RBigData/pbdXGB | c309fc6ce317f5db6c3ac9a838ef71724657f062 | c76b807cfd60aaa61e8ab233873c6e3ea41b3e6c | refs/heads/master | 2020-09-07T05:46:55.322685 | 2020-02-16T23:44:09 | 2020-02-16T23:44:09 | 220,674,364 | 0 | 0 | Apache-2.0 | 2020-02-16T23:44:10 | 2019-11-09T16:59:56 | C++ | UTF-8 | R | false | false | 1,213 | r | xgboost.R | # Simple interface for training an xgboost model that wraps \code{xgb.train}.
# Its documentation is combined with xgb.train.
#
#' @rdname xgb.train
#' @export
xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds,
verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL,
save_period = NULL, save_name = "xgboost.model",
xgb_model = NULL, callbacks = list(), ...) {
dtrain <- xgb.get.DMatrix(data, label, missing, weight)
watchlist <- list(train = dtrain)
bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose = verbose, print_every_n = print_every_n,
early_stopping_rounds = early_stopping_rounds, maximize = maximize,
save_period = save_period, save_name = save_name,
xgb_model = xgb_model, callbacks = callbacks, ...)
return(bst)
}
# Various imports
#' @importClassesFrom Matrix dgCMatrix
#' @importFrom data.table rbindlist
#' @importFrom stringi stri_split_regex
#' @importFrom stats predict
#'
#' @import methods
#' @import xgboost
#' @import pbdMPI
#'
NULL
|
466692faf81626a468779c27caa9e486cf6312b9 | 01588666e7f7f7c5fbe2e7fea1c1c732851f3f7e | /cachematrix.R | 91c1b97dbcaaf9ca3a288ddc941b7f62bb08d287 | [] | no_license | sanpau/ProgrammingAssignment2 | 38e4537ee82a9ed30a93caa109bdaddec466ac1f | 88626791f6adb10705301618a70ed9ded6e664a7 | refs/heads/master | 2020-07-06T08:47:40.899163 | 2016-11-18T17:29:42 | 2016-11-18T17:29:42 | 74,050,666 | 0 | 0 | null | 2016-11-17T17:25:55 | 2016-11-17T17:25:55 | null | UTF-8 | R | false | false | 1,772 | r | cachematrix.R |
## pair of functions that
## cache the inverse of a matrix.
## `makeCacheMatrix`: This function creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invt <- NULL
# function that sets the value of the matrix IN set
set <- function(y) {
x <<- y
invt <<- NULL
}
#returns matrix stored in the main function
get <- function() x
# store the value of the input in a variable invt into the main function
# makeCacheMatrix (setinvt) and return it (getinvt)
setinvt <- function(inverse) invt <<- inverse
getinvt <- function() invt
list(set = set, get = get, setinvt = setinvt, getinvt = getinvt)
}
## This function computes the inverse of the special
##"matrix" returned by `makeCacheMatrix` above. If the inverse has
##already been calculated (and the matrix has not changed), then
##`cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## If the inverse has already been calculated, then the cacheSolve should retrieve the inverse from the cache.
invt <- x$getinvt()
if(!is.null(invt)) {
message("getting cached result")
return(invt)
}
## If the inverse has not been calculated, data gets the matrix stored with makeCacheMatrix,
## invt calculates the inverse, and x$setinvt(invt) stores it in the object invt in makeCacheMatrix
data <- x$get()
invt <- solve(data, ...)
x$setinvt(invt)
invt
}
##Output of the above :
## mtrx <- matrix(c(1,1,1,3,4,3,3,3,4),3,3)
##> mtrx
##[,1] [,2] [,3]
##[1,] 1 3 3
##[2,] 1 4 3
##[3,] 1 3 4
##> mtrx1 <- makeCacheMatrix(mtrx)
##> cacheSolve(mtrx1)
##[,1] [,2] [,3]
##[1,] 7 -3 -3
##[2,] -1 1 0
##[3,] -1 0 1
|
e8db9e2229b109ccb9f01002b79904edfe10f166 | 601d899094f8f73a5356e30fd8d721801b6de757 | /R/transformations.R | 707f34bfaea19ea9f8fc62be97b67974c386a40e | [
"MIT"
] | permissive | vmikk/vmik | e8b5b7e90342fad800ac8a139b1590b1becc454e | 4d11200e3928f248050303203e3c7829139817dd | refs/heads/master | 2020-04-07T08:12:12.754247 | 2018-11-19T07:16:02 | 2018-11-19T07:16:02 | 158,204,616 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,111 | r | transformations.R | ## TO DO:
# - IHS - add 'back' flag to perform reverse transformation
# - gelman_scale - see also arm::rescale(, binary.inputs = "full")
# Standardizing by сentering and вividing by 2 standard deviations
gelman_scale <- function(x){
x.obs <- x[!is.na(x)]
mm <- mean(x.obs)
ss <- sd(x.obs)
res <- (x - mm)/(2 * ss)
attr(res, "scaled:center") <- mm
attr(res, "scaled:scale") <- ss
return(res)
}
#' Inverse hyperbolic sine transformation
#'
#' @description Inverse hyperbolic sine transformation. Unlike a log variable, the inverse hyperbolic sine is defined at zero.
#' @param x Numeric vector
#' @return Vector of transformed values.
#' @details It is an alternative to log transformations when some of the variables take on zero or negative values and as an alternative to the Box-Cox when variables are zero or negative.
#' Except for very small values of y, the inverse sine is approximately equal to log(2yi) or log(2)+log(yi), and so it can be interpreted in exactly the same way as a standard logarithmic dependent variable. For example, if the regression coefficient on "urban" is 0.1, that tells us that urbanites have approximately 10 percent higher wealth than non-urban people.
#' @seealso \code{asinh}
#' @references Burbidge J.B., Magee L., Robb A.L. Alternative transformations to handle extreme values of the dependent variable // Journal of the American Statistical Association. 1988. V. 83. № 401. P. 123-127.
#' @examples
#' IHS(seq(-10, 10, 1))
#'
IHS <- function(x){
log(x + sqrt(x^2 + 1))
}
#' Scale numeric vector the specified interval
#'
#' @param x Numeric vector.
#' @param limitMin Lower value of the interval (default = 0).
#' @param limitMax Upper value of the interval (default = 1).
#'
#' @return Transformed values of \code{x} that belongs to the new range.
#' @seealso scales::rescale
#' @examples
#' x <- 1:20
#' scale_to_interval(x)
#' scale_to_interval(x, 2, 5)
#'
scale_to_interval <- function(x, limitMin = 0, limitMax = 1){
res <- (limitMax - limitMin) * (x - min(x))
res <- res / ( (max(x) - min(x)))
res <- res + limitMin
return(res)
}
|
12be28f962e06a5b47e7664009f16b6f5eb80dd8 | dec3db3c118c3aea6f73288b43c5e87c90f60091 | /FigS2_ExtendedData2/FigS2.R | ce60865db5963d9b9f0c0e8468f067480629d387 | [] | no_license | livkosterlitz/Figures-Jordt-et-al-2020 | c3b48d702107a3697d3fecd42410d031ff4a328c | 70e06b4affeddcff15b5c78a8922ee1b8dcd08bf | refs/heads/master | 2021-01-08T10:28:43.307167 | 2020-08-06T16:54:15 | 2020-08-06T16:54:15 | 242,003,143 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,229 | r | FigS2.R | library(tidyverse)
library(cowplot)
library(lattice)
library(gridExtra)
library(grid)
library(egg)
#########################
#FigureS2########
#####################
###Ancestor###
dat <- read.csv("FigS2_low.csv")
dat <- dat %>%
#select(-Mixture, -CFUs) %>%
group_by(Host, Antibiotic, Day) %>%
summarise(N = n(),
CFUs = mean(CFUs_sub),
SD = sd(CFUs_sub),
SE = SD/sqrt(N))
levels(dat$Antibiotic)
colors_light <- c('darkgrey', 'firebrick1', 'blue2', 'darkorchid2')
p1 <- ggplot(dat %>% filter(Host=='A'),
aes(x=Day, y=CFUs, color=Antibiotic)) +
theme_cowplot(12)+
geom_line(linetype = "dashed", size = 0.4668623442372146) +
geom_errorbar(aes(ymin=CFUs-SE, ymax=CFUs+SE), width=0, size = 0.4668623442372146) +
geom_point(aes(shape=Antibiotic, color=Antibiotic, fill=Antibiotic, size=Antibiotic)) +
scale_color_manual(values=colors_light) +
scale_fill_manual(values=colors_light)+
scale_size_manual(values=c(2,1,1,2))+
scale_shape_manual(values=c(16,25,24,18)) +
scale_y_log10(expand=c(0, 0),
limits=c(1e0, 1e4),
breaks=10^seq(0, 4, by=1),
labels=seq(0, 4, by=1)) +
scale_x_continuous(expand=c(0, 0), limits = c(0,4.2)) +
theme(legend.position="none") +
theme(axis.title.x=element_blank()) +
theme(axis.title.y=element_blank()) +
theme(axis.line.y = element_line(size = 0.3734899)) +
theme(axis.line.x = element_line(size = 0.3734899)) +
theme(axis.ticks = element_line(size = 0.3734899))+
theme(axis.text.x = element_text(margin=margin(1,0,0,0,"pt")),
axis.text.y = element_text(margin=margin(0,1,0,0,"pt")))+
theme(axis.ticks.length=unit(.025, "in"))+
theme(plot.margin = margin(.2, 0, 0, .05, "in"))+
expand_limits(x = 0, y = 0) +
theme(axis.text = element_text(size = 8))
p1
###Evolved###
dat1 <- read.csv("FigS2_high.csv")
dat1 <- dat1 %>%
#select(-Mixture, -CFUs) %>%
group_by(Host, Antibiotic, Day) %>%
summarise(N = n(),
CFUs = mean(CFUs_sub),
SD = sd(CFUs_sub),
SE = SD/sqrt(N))
colors_dark <- c('darkgrey', 'firebrick1', 'blue2', 'darkorchid2')
p3 <- ggplot(dat1 %>% filter(Host=='B'),
aes(x=Day, y=CFUs, color=Antibiotic)) +
theme_cowplot(12)+
geom_line(size = 0.4668623442372146) +
geom_errorbar(aes(ymin=CFUs-SE, ymax=CFUs+SE), width=0, size = 0.4668623442372146) +
geom_point(aes(shape=Antibiotic, color=Antibiotic, fill=Antibiotic, size=Antibiotic)) +
scale_color_manual(values=colors_light) +
scale_fill_manual(values=colors_light)+
scale_size_manual(values=c(2,1,1,2))+
scale_shape_manual(values=c(16,25,24,18)) +
scale_y_log10(expand=c(0, 0),
limits=c(1e0, 1e4),
breaks=10^seq(0, 4, by=1),
labels=seq(0, 4, by=1)) +
scale_x_continuous(expand=c(0, 0), limits = c(0,4.2)) +
theme(legend.position="none") +
theme(axis.title.x=element_blank()) +
theme(axis.title.y=element_blank()) +
theme(axis.line.y = element_line(size = 0.3734899)) +
theme(axis.line.x = element_line(size = 0.3734899)) +
theme(axis.ticks = element_line(size = 0.3734899))+
theme(axis.text.x = element_text(margin=margin(1,0,0,0,"pt")),
axis.text.y = element_text(margin=margin(0,1,0,0,"pt")))+
theme(axis.ticks.length=unit(.025, "in"))+
theme(plot.margin = margin(.2, 0, 0, .05, "in"))+
expand_limits(x = 0, y = 0) +
theme(axis.text = element_text(size = 8))
p3
Figp1_fixed <- set_panel_size(p1, width = unit(1.35, "in"), height = unit(1.35, "in"))
Figp3_fixed <- set_panel_size(p3, width = unit(1.35, "in"), height = unit(1.35, "in"))
FigureS2_main <- plot_grid(Figp1_fixed, Figp3_fixed,
ncol = 2, align = "v",
labels = c('a','b'), label_size = 10)
FigureS2_main
#create common x and y labels
y.grob <- textGrob("cell density [log ((CFUs/mL)+1)]",
gp=gpar(fontsize=10), rot=90)
x.grob <- textGrob("Transfer",
gp=gpar(fontsize=10))
#add common axis to plot
FigureS2 <- grid.arrange(arrangeGrob(FigureS2_main, left = y.grob, bottom = x.grob))
save_plot("FigureS2.pdf", plot = FigureS2, base_width = 3.45, base_height = 1.85)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.