blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cacc52e5e950fa90e77b3274fe74e2a8b05885a | b0a8f8d6984078682450613d8c351664341c47a8 | /Plot1.R | ed4e140738440b7eea40388ab191a374ae7bea02 | [] | no_license | silhouetted/ExData_Plotting1 | c20df765d223f815c1a4f380b8d2c26c32c2cf02 | fb6b06e8149d3a2640eb580e7cbb4169410001ab | refs/heads/master | 2020-04-05T22:11:02.827530 | 2018-11-12T18:35:17 | 2018-11-12T18:35:17 | 157,247,818 | 0 | 0 | null | 2018-11-12T17:04:38 | 2018-11-12T17:04:37 | null | UTF-8 | R | false | false | 2,026 | r | Plot1.R | #### Exploratory data analysis Week 1 Assignment script
### Reading in and subsetting the data to 1st and 2nd Feb 2007
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# download file if it does not exist
if (!file.exists("ElectricalUsageData.zip")) {
download.file(fileUrl, destfile = "ElectricalUsageData.zip", mode = wb)
}
# unzip file if not unzipped
if(!file.exists("EUD")){
unzip("ElectricalUsageData.zip", exdir = "./EUD")
}
# read in the file that was unzipped into the EUD directory marking ? as NA
allElectricData <- read.table("./EUD/household_power_consumption.txt", sep = ";", header = TRUE, colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"), na.strings = "?")
# load lubridate to set the date as date rather than character
library(lubridate)
# convert the date to a date format
allElectricData$Date <- dmy(allElectricData$Date) # date in yyyy-mm-dd format
# subset the data from the specific date in Feb we need
febElectricData <- subset(allElectricData, Date == "2007-02-01" | Date == "2007-02-02")
# delete the rest of the data from workspace (taking up nearly 150mb of precious RAM)
rm(allElectricData)
# Draw the first PNG file - a histogram
png("Plot1.png") # set output file
with(febElectricData, hist(Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
dev.off() # turn off output to png
if(file.exists("Plot1.png")) {
cat("File created successfully")
} else {
warning("File not created. Please try again.")
}
|
d6b4fdaa8c469899a29b7ee5df2dbea7503f947c | 9df25083c9e3b935853cd941e1f6e9dfd4878322 | /01-PH01-SC1-FINAL.R | 4a35d431d9d33a8d49f073252f1790414656252c | [
"MIT"
] | permissive | rintukutum/precisionFDA-BCC-PH01 | 13640c440795f80281ed8835273c32c7923cd10d | e4392a4ea96b266dfdcf3f5e8b952cf73b727190 | refs/heads/master | 2020-12-29T05:26:35.916184 | 2020-02-05T15:57:30 | 2020-02-05T15:57:30 | 238,470,111 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,171 | r | 01-PH01-SC1-FINAL.R | #-----------------
# TRAIN, CV & TEST
rm(list=ls())
load('./data/sc1.data.RData')
#-------------------------------
# SPLIT DATA INTO TRAIN AND TEST
set.seed(907)
train.idx <- caret::createDataPartition(
y = sc1.data$outcome$SURVIVAL_STATUS,
times = 1,
p = 0.8
)[[1]]
tr.samp <- sc1.data$outcome$PATIENTID[train.idx]
table(sc1.data$outcome$SURVIVAL_STATUS[train.idx])
#------------------
# 80% dataset
# 0 1
# 40 262
#------------------
sc1.train <- list(
phenotype = sc1.data$phenotype[sc1.data$phenotype$PATIENTID %in% tr.samp,],
y = sc1.data$outcome[sc1.data$outcome$PATIENTID %in% tr.samp,'SURVIVAL_STATUS'],
x = sc1.data$feature[sc1.data$feature$PATIENTID %in% tr.samp,-1]
)
sc1.test <- list(
phenotype = sc1.data$phenotype[!(sc1.data$phenotype$PATIENTID %in% tr.samp),],
y = sc1.data$outcome[!(sc1.data$outcome$PATIENTID %in% tr.samp),'SURVIVAL_STATUS'],
x = sc1.data$feature[!(sc1.data$feature$PATIENTID %in% tr.samp),-1]
)
save(sc1.train,sc1.test,file = './data/PH01-SC1-model-data.RData')
#----------------
# Feature reduction
#----------------
rm(list = ls())
load('./data/PH01-SC1-model-data.RData')
source('func-room.R')
idx.0 <- sc1.train$y == 0
idx.1 <- sc1.train$y == 1
feature.pvals <- apply(sc1.train$x,2,performTEST)
sc1.feature.pvals <- feature.pvals[order(feature.pvals)]
save(sc1.feature.pvals,
file='./data/PH01-SC1-feature-pvals.RData')
#-----------------------
# TRAINING PHASE, 5-FOLD CV
rm(list=ls())
load('./data/PH01-SC1-feature-pvals.RData')
load('./data/PH01-SC1-model-data.RData')
source('func-room.R')
p.cutoffs <- c(1.0e-05,1.0e-04,1.0e-03,1.0e-02)
for(i in 1:length(p.cutoffs)){
message('Threshold pval <= ',p.cutoffs[i])
p.cutoff <- p.cutoffs[i]
sc1.features <- sc1.feature.pvals[sc1.feature.pvals <= p.cutoff]
#---------------
model.feature <- names(sc1.features)
tr.x <- sc1.train$x[,model.feature]
tr.y <- as.factor(sc1.train$y)
tt.x <- sc1.test$x[,model.feature]
tt.y <- as.factor(sc1.test$y)
ph01.sc1.SVM.mods <- performSVM(
tr.x = tr.x,
tr.y = tr.y,
tt.x = tt.x,
tt.y = tt.y,
SEED = 768
)
perf.PH01.SC1 <- getPerfMetrics(x = ph01.sc1.SVM.mods)
dir.create('./data/MODEL-PERF-PH01/',showWarnings = FALSE)
outname <- paste0(
'./data/MODEL-PERF-PH01/perf.PH01.SC1',
p.cutoff,'.RData')
save(perf.PH01.SC1,
file = outname)
p.train <- get.TPR.FPR.plot(x = ph01.sc1.SVM.mods,type = 'train')
p.test <- get.TPR.FPR.plot(x = ph01.sc1.SVM.mods,type = 'test')
dir.create('./figures',showWarnings = FALSE)
outfile <- paste0('./figures/PH01-SC1-SVM-',p.cutoff,'.png')
png(outfile,
width = 1000,height = 1000,
res = 200)
gridExtra::grid.arrange(
p.train + ggtitle(paste('Train | pval <= ',p.cutoff)),
p.test + ggtitle(paste('Test | pval <= ',p.cutoff)))
dev.off()
}
#-----------------------
# FINAL MODEL
rm(list=ls())
load('./data/PH01-SC1-feature-pvals.RData')
load('./data/PH01-SC1-model-data.RData')
sc1.features <- sc1.feature.pvals[sc1.feature.pvals <= 1.0e-05]
#---------------
model.feature <- names(sc1.features)
message('Features used!')
message(paste(model.feature,collapse = ', '))
tr.x <- sc1.train$x[,model.feature]
tr.y <- as.factor(sc1.train$y)
tt.x <- sc1.test$x[,model.feature]
tt.y <- as.factor(sc1.test$y)
sc1.mod.data <- list(
tr.x = tr.x,
tr.y = tr.y,
tt.x = tt.x,
tt.y = tt.y
)
dir.create('./data/MODEL-DATA-PH01',showWarnings = FALSE)
save(sc1.mod.data,
file = './data/MODEL-DATA-PH01/sc1.mod.data.RData')
#-------------------------------------------------------
#-------------------------------------------------------
rm(list=ls())
load('./data/MODEL-DATA-PH01/sc1.mod.data.RData')
source('func-room.R')
ph01.sc1.SVM.mods <- performSVM(
tr.x = sc1.mod.data$tr.x,
tr.y = sc1.mod.data$tr.y,
tt.x = sc1.mod.data$tt.x,
tt.y = sc1.mod.data$tt.y,
SEED = 768
)
perf.PH01.SC1 <- getPerfMetrics(x = ph01.sc1.SVM.mods)
sink('./submission/Summary-Phase1-SC1-TRAIN.txt')
print(perf.PH01.SC1$tr)
sink()
dir.create('./data/MODEL-PERF-FINAL-PH01/',showWarnings = FALSE)
outname <- './data/MODEL-PERF-FINAL-PH01/perf.PH01.SC1.FINAL.RData'
save(perf.PH01.SC1,
file = outname)
|
a96e2c29d4df17c11d0fd581aec8dbf5515a5056 | 7af0de4a6767812f392bd69a2298f45550f8abb5 | /Bagged_Loess_Lattice.R | 00d0ae511c8c7bbe9c2d7af021799907d9afb88f | [] | no_license | SudhakaranP/Statistical_Learning_Basics | 615077494c15c9ae8f28cd3e856eee7b8cd03678 | 40162b9831bdc165da5af926cc2c7ba8a9fe674f | refs/heads/master | 2021-06-14T18:56:40.890625 | 2016-12-14T02:04:34 | 2016-12-14T02:04:34 | 105,226,455 | 0 | 1 | null | 2017-09-29T03:37:24 | 2017-09-29T03:37:23 | null | UTF-8 | R | false | false | 862 | r | Bagged_Loess_Lattice.R | library(ElemStatLearn)
set.seed(105)
ll <- matrix(NA,nrow=100,ncol=155)
for(i in 1:100){
ss <- sample(1:dim(ozone)[1],replace=T)
ozone0 <- ozone[ss,]; ozone0 <- ozone0[order(ozone0$ozone),]
loess0 <- loess(temperature ~ ozone,data=ozone0,span=0.2)
ll[i,] <- predict(loess0,newdata=data.frame(ozone=1:155))
}
xyplot(temperature~ozone, data = ozone
, pch = 19, col = "black"
, bagLines = ll
, prepanel = function(x,y, bagLines) {
list(xlim = c(1, max(x))
, ylim = c(min(bagLines, na.rm = TRUE)
, max(bagLines, na.rm = TRUE)))
}
, panel = function(x,y, ...) {
for (i in 1:100) {
panel.lines(1:155,ll[i,], alpha = 0.1, col = "green")
}
panel.xyplot(x,y, ...)
panel.lines(1:155,apply(ll,2,mean),col="red",lwd=3)
}
)
|
293ff38a2ed703a9fc70b6a468f439e2b5d98b3d | 171398356488ee085e053595b7e9e43d671c1586 | /HW4-Gairola-Abhijit.R | 2cdb3e9de979d1908bfcc973452e18ded7693902 | [] | no_license | dexter11235813/Math185 | 2e83986eba0af687b444bd97d7bfd02a4d0c2bd1 | ae5d60b5c38a2cba38274867657c3e33be380e97 | refs/heads/master | 2020-07-03T23:42:52.900984 | 2016-11-19T16:51:55 | 2016-11-19T16:51:55 | 74,222,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,222 | r | HW4-Gairola-Abhijit.R | #Q1
head(ToothGrowth)
SST = function(dat,Y...)
{
temp1 = c()
for(i in 1:2)
{
temp1[i] = mean(dat[,i,])
}
return(30*sum((temp1 - Y...)^2))
}
# for B = 1:999, we permute data within each block and calculate SST of that table, and compare it with the SST of the original data.
twowayPermTest = function(ToothGrowth, B = 999)
{
a11 = subset(ToothGrowth,(supp == "VC") & (dose ==0.5))$len
a12 = subset(ToothGrowth,(supp == "VC") & (dose ==1))$len
a13 = subset(ToothGrowth,(supp == "VC") & (dose ==2))$len
a21 = subset(ToothGrowth,(supp == "OJ") & (dose ==0.5))$len
a22 = subset(ToothGrowth,(supp == "OJ") & (dose ==1))$len
a23 = subset(ToothGrowth,(supp == "OJ") & (dose ==2))$len
two.way.table = data.frame(a11,a12,a13,a21,a22,a23)
v = c()
for(i in 1:10)
{
for(j in 1:6)
{
v = c(v,two.way.table[i,j])
}
}
# store the data from ToothGrowth in a 3x2x10 array
two.way.table.3D = array(v,c(3,2,10))
colnames(two.way.table.3D) = c("VC","OJ")
rownames(two.way.table.3D) = c(0.5,1,2)
Y... = mean(two.way.table.3D)
# calculating SST on the original given data
d = SST(two.way.table.3D, Y...)
#
# Permuting across all treatements within each block
d.perm = c()
for(j in 1:B)
{
for(i in 1:3)
{
temp = c(two.way.table.3D[i,1,],two.way.table.3D[i,2,])
temp = sample(temp)
two.way.table.3D[i,1,] = temp[1:10]
two.way.table.3D[i,2,] = temp[11:20]
}
d.perm[j] = SST(two.way.table.3D,Y...)
}
p.val = (length(which(d.perm >= d))+1)/(B+1)
return(p.val)
}
print(twowayPermTest(ToothGrowth,999))
# pvalues range from 0.01 - 0.03
#Q2
load("alon.RData")
alon = as.data.frame(alon)
colnames(alon) = c(seq(1:2000),"y")
p.val.vec = c()
for(i in 1:2000)
{
p.val.vec[i] = as.numeric(t.test(alon[,i] ~ y,data = alon)$p.value)
}
p.val.vec = as.vector(p.val.vec)
#sorting the vector containing p values
temporary = p.val.vec[order(p.val.vec)]
# number of Hypothesis rejected by using the Bonferroni correction
length(which(p.adjust(temporary,"bon") <= 0.05))
# number of Hypothesis rejected by using the Holm correction
length(which(p.adjust(temporary,"holm") <= 0.05))
# number of Hypothesis rejected by using the Hochberg correction
length(which(p.adjust(temporary,"hoch")<=0.05))
# number of Hypothesis rejected by using the FDR correction
length(which(p.adjust(temporary,"BH")<=0.05))
# From the above runs,we see that FDR is the most conservative of all corrections, with 190 rejected hypothesis,whereas
# Bonferroni,Holm and Hochberg all reject 11 out of 2000 hypothesis.
#Q3
B = 50 # low B value used to reduce runtime
t.test.stat = c()
t.test.stat.perm = c()
#vector that holds the p-value for each of the 2000 genes.
p.value = 0
for(i in 1:2000)
{
t.test.stat[i] = t.test(alon[,i] ~ y , data = alon)$statistic
}
for(i in 1:B)
{
# sampling the subjects 'y'
alon$y = sample(alon$y)
for(j in 1:2000)
{
t.test.stat.perm[j] = t.test(alon[,j] ~ y,data = alon)$statistic
}
p.value = p.value + ifelse((abs(t.test.stat.perm) >= abs(t.test.stat)),1,0)/(B+1)
}
print(p.value) |
05dbcc03744c5f696c565a3a23013663a05a62dc | 808f796821392a6ce4dd2a243aac01cce1513e0e | /src/plotFlightLine.R | c13602d3708600dd725d5d1abc7bdc436aeecc47 | [] | no_license | hdugan/WrightValley_AEM | f7c7b9ba72c7fe3a609cd9acade05cb705fc7de3 | 6eb83f7d761ee75d77124808409a4749bf0e4f10 | refs/heads/main | 2023-04-15T03:06:19.803795 | 2022-08-04T15:31:58 | 2022-08-04T15:31:58 | 475,973,442 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,740 | r | plotFlightLine.R | #dvdpLocation is UTMX, UTMY, depth. ex) c(416673, 1391082, 85.73)
plotFlightLine <- function(lineNo, aemDF, dvdpLocation = NULL, dvdpLocation2 = NULL) {
# Derive depths of each layer
depths = aemDF |> filter(LINE_NO == lineNo) |>
dplyr::select(DEP_TOP_1:DEP_TOP_30) |>
pivot_longer(cols = DEP_TOP_1:DEP_TOP_30, names_to = "bin", values_to = "depths") |>
mutate(bin = parse_number(bin)) |>
group_by(bin) |>
summarise_all(first)
# Get depth of DOI
doi = aemDF |> filter(LINE_NO == lineNo) |>
dplyr::select(UTMX, UTMY, DOI_CONSERVATIVE)
# Get distance between points
flightDist = aemDF |> filter(LINE_NO == lineNo) |>
dplyr::select(UTMX, UTMY) |>
mutate(row = row_number())
# Make all lines plot west to east
WE = -1
if(flightDist |> slice(1) |> pull(UTMX) > flightDist |> slice(n()) |> pull(UTMX)) {
flightDist = flightDist |> map_df(rev)
WE = 1 # This comes into play later for blackout boxes
}
flightDist = flightDist |>
mutate(across(.fns = lag, .names = '{col}_next')) |>
rowwise() |>
mutate(dist = pointDistance(c(UTMX, UTMY),c(UTMX_next, UTMY_next), method = 'Euclidean', lonlat = F)[1]) |>
mutate(dist = if_else(is.na(dist), 0, dist)) |>
ungroup() |>
mutate(distcum = cumsum(dist)) |>
arrange(row)
# add in thickness and DOIs and flightDist
a = aemDF |> filter(LINE_NO == lineNo) |>
dplyr::select(UTMX, UTMY, ELEVATION, RHO_I_1:RHO_I_30) |>
pivot_longer(cols = RHO_I_1:RHO_I_30, names_to = "bin", values_to = "rho") |>
mutate(bin = parse_number(bin)) |>
left_join(depths) |>
left_join(doi) |>
left_join(flightDist) |>
mutate(depth.elev = round(ELEVATION - depths), doi.elev = round(ELEVATION - DOI_CONSERVATIVE))
# No 30 bin for 2011 flight lines
if (any(is.na(a |> filter(bin ==30) |> pull(rho)))) {
a = a |> filter(bin != 30)
}
mindoi = min(a$depth.elev)
# b = data.frame(new.depths = seq(min(a$depth.elev), max(a$depth.elev), by = 1))
b = expand_grid(distcum = unique(a$distcum), depth.elev = seq(min(a$depth.elev, na.rm = T), max(a$depth.elev, na.rm = T), by = 1)) |>
left_join(a) |>
group_by(distcum) %>%
mutate(rhoInterp = na.approx(rho, na.rm=FALSE, method = 'linear')) |>
mutate(rhoInterp = if_else(rhoInterp < 1, 1, rhoInterp)) |>
mutate(rhoInterp = if_else(rhoInterp > 1e4, 1e4, rhoInterp)) |>
mutate(rhoBackground = if_else(!is.na(rhoInterp), 1e4, NA_real_)) |>
ungroup()
# Get bathymetry
# test extracting depths over line
testline = a |> filter(bin == 1) |> dplyr::select(UTMX,UTMY)
coordinates(testline) <- ~UTMX+UTMY
raster::crs(testline) = raster::crs(DEM3)
testline.depths <- raster::extract(DEM3, # raster layer
testline, # SPDF with centroids for buffer
method = 'simple',
buffer = 5, # buffer size, units depend on CRS
fun = median, # what to value to extract
df = FALSE) # return a dataframe?
testline.df = a |> filter(bin == 1) |>
dplyr::select(UTMX,UTMY, ELEVATION, distcum) |>
mutate(bath.depth = testline.depths) |>
mutate(bathy = ELEVATION - bath.depth)
# Extract DVDP borehole location for plot
if (!is.null(dvdpLocation)) {
dvdpLine = a |> filter(bin == 1) |>
dplyr::select(UTMX,UTMY, ELEVATION, distcum) |>
mutate(UTMXd = dvdpLocation[1], UTMYd = dvdpLocation[2]) |>
mutate(UTMxDIFF = abs(UTMX - UTMXd), UTMyDIFF = abs(UTMY - UTMYd)) |>
filter(UTMxDIFF == min(UTMxDIFF))
}
if (!is.null(dvdpLocation2)) {
dvdpLine2 = a |> filter(bin == 1) |>
dplyr::select(UTMX,UTMY, ELEVATION, distcum) |>
mutate(UTMXd = dvdpLocation2[1], UTMYd = dvdpLocation2[2]) |>
mutate(UTMxDIFF = abs(UTMX - UTMXd), UTMyDIFF = abs(UTMY - UTMYd)) |>
filter(UTMxDIFF == min(UTMxDIFF))
}
# CONTINUOUS SCALE - Interpolated rho
# Get black out zones
rr = b |>
filter(bin == 1) |>
mutate(index = row_number()) |>
mutate(test = dist>100) %>%
filter(test == TRUE) |>
pull(index)
blackout = b |>
filter(bin == 1) |>
mutate(index = row_number()) |>
filter(index %in% rr | index %in% (rr+WE))
# Plot resistivity
options(scipen = 1)
mybreaks = seq(0,4)
mylabels = 10^mybreaks
# Dataframe for DOI polygon
b.ribbon = b |>
filter(!is.na(DOI_CONSERVATIVE)) |>
filter(bin == 30) |>
mutate(doi.elev = if_else(doi.elev < depth.elev, depth.elev, doi.elev))
#Plots using contours
p2 = ggplot(b) +
geom_contour_fill(aes(x = distcum, y = depth.elev, z = log10(rhoBackground))) + # Interpolation can leave gaps... this fills in with max limit of 1e4
geom_contour_fill(bins = 20, aes(x = distcum, y = depth.elev, z = log10(rhoInterp))) +
scale_fill_gradientn(name = 'rho (Ω-m)', colours = rev(met.brewer("Hiroshige", n=100)),
limits = log10(c(1, 1e4)),
breaks = mybreaks, labels = mylabels,
oob = scales::squish) +
geom_ribbon(data = b.ribbon,
aes(x = distcum, ymax = doi.elev, ymin = mindoi), alpha = 0.8) + # DOI ribbon
geom_path(data = testline.df, aes(x = distcum, y = bathy)) + # lake bathymetry
xlab('Distance W → E (m)') + ylab('Elevation (m)') +
theme_minimal(base_size = 8)
if (!is.null(dvdpLocation)) {
p2 = p2 + geom_linerange(data = dvdpLine, aes(x = distcum, ymin = ELEVATION - dvdpLocation[3], ymax = ELEVATION), color = 'black', size = 1) # DVDP borehole
}
if (!is.null(dvdpLocation2)) {
p2 = p2 + geom_linerange(data = dvdpLine2, aes(x = distcum, ymin = ELEVATION - dvdpLocation2[3], ymax = ELEVATION), color = 'black', size = 1) # DVDP borehole
}
if(nrow(blackout) == 2) {
p2 = p2 + geom_rect(data = blackout, aes(xmin = distcum[1], xmax = distcum[2], ymin = mindoi, ymax = ELEVATION[1])) # blackout
}
if(nrow(blackout) > 2) {
p2 = p2 + geom_rect(data = blackout, aes(xmin = distcum[1], xmax = distcum[2], ymin = mindoi, ymax = ELEVATION[1])) # blackout
p2 = p2 + geom_rect(data = blackout, aes(xmin = distcum[3], xmax = distcum[4], ymin = mindoi, ymax = ELEVATION[3])) # blackout
p2 = p2 + geom_rect(data = blackout, aes(xmin = distcum[5], xmax = distcum[6], ymin = mindoi, ymax = ELEVATION[5])) # blackout
p2 = p2 + geom_rect(data = blackout, aes(xmin = distcum[7], xmax = distcum[8], ymin = mindoi, ymax = ELEVATION[7])) # blackout
p2 = p2 + geom_rect(data = blackout, aes(xmin = distcum[9], xmax = distcum[10], ymin = mindoi, ymax = ELEVATION[9])) # blackout
}
return(p2)
}
|
4a24b742e5d454d131d0a57f8f434ff38416b3f1 | 39d7f1d2d81c2a9fc37df54cccc8571f7a1d31e0 | /EM/R_codes/c_em_algorithm_generic.R | 89011d5b58571043b69db3f126b2899df14506c1 | [] | no_license | historical-record-linking/matching-codes | 4a989b65f12791a206c24e8646a1f37881bf44ec | 31aaf4a85056b3f58aa71342611ba2faead66306 | refs/heads/master | 2022-11-21T18:32:15.941226 | 2020-07-08T23:15:26 | 2020-07-08T23:15:26 | 258,260,033 | 16 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,136 | r | c_em_algorithm_generic.R |
rm(list=ls())
dropbox <- "C:/Users/acald/Desktop/test_em_data/"
# set directories
EMdistances <- paste(dropbox,"data/new_codes/em_santi_small2/EMdistances/",sep = "")
EMmatches <- paste(dropbox,"data/new_codes/em_santi_small2/EMmatches/",sep = "")
# Select threshold for Algorithm and maximum number of iterations
stop_at_param<-0.00001
iter<-3000
#Load packages
library(stringdist)
library(doParallel)
library(foreign)
library(readstata13)
library(dplyr)
library(doParallel)
library(plyr)
library(doParallel)
library(foreach)
registerDoParallel(cores=2)
options(scipen=999)
#Import appended data on summary of distances (created by jaro_winkler_names.R)
data<-vector()
data <- read.table(paste(EMdistances,"summary.csv",sep=""), header = T)
data <- data.frame(data)
Dist_FN<-data$Dist_FN
Dist_LN<-data$Dist_LN
Age_Dist<-data$Age_Dist
# *EM Algorithm*
## PRIOR Distributions: Initial value for proportion of matches
n_obs<-sum(data$Count)
p_M_0<-data$N[1]/n_obs
p_U_0<-1-p_M_0
###String distribution: multinomial
###Set number of categorical variables (k) for names and ages
#### 4 groups defined below by the dic_strdist functions
k_N<-4
k_A<-6
i_N<-1:k_N
i_A<-1:k_A
#Priors are set equal to 1/k for the unmatched and to 1/k + [(k-1)/2 - (i-1)]/(k^2) for the matched
###First name priors (on the unmatched assume no further information provided by knowing that unmatched. for the matched impose a decreasing LR):
theta_strdist_FN_U_0<-(aggregate(data$Count, by=list(Dist_FN=data$Dist_FN), FUN=sum)/n_obs)[,2]
theta_strdist_FN_M_0<-(1/k_N+((k_N-1)/2-(i_N-1))/k_N^2)
for (i in c(k_N:2)){
if (theta_strdist_FN_M_0[i-1]/theta_strdist_FN_M_0[i]<theta_strdist_FN_U_0[i-1]/theta_strdist_FN_U_0[i]){
theta_strdist_FN_M_0[i-1]<-theta_strdist_FN_M_0[i]*theta_strdist_FN_U_0[i-1]/theta_strdist_FN_U_0[i]
}
}
theta_strdist_FN_M_0<-theta_strdist_FN_M_0/sum(theta_strdist_FN_M_0)
###Last name priors (my prior is that the Pr(Distance/U) is approx the empirical frequencies of each distance)
theta_strdist_LN_U_0<-(aggregate(data$Count, by=list(Dist_LN=data$Dist_LN), FUN=sum)/n_obs)[,2]
theta_strdist_LN_M_0<-(1/k_N+((k_N-1)/2-(i_N-1))/k_N^2)
for (i in c(k_N:2)){
if (theta_strdist_LN_M_0[i-1]/theta_strdist_LN_M_0[i]<theta_strdist_LN_U_0[i-1]/theta_strdist_LN_U_0[i]){
theta_strdist_LN_M_0[i-1]<-theta_strdist_LN_M_0[i]*theta_strdist_LN_U_0[i-1]/theta_strdist_LN_U_0[i]
}
}
theta_strdist_LN_M_0<-theta_strdist_LN_M_0/sum(theta_strdist_LN_M_0)
###Age distribution: multinomial over 0,1,2,3,4,5 absolute age difference
theta_agedist_U_0<-(aggregate(data$Count, by=list(Age_Dist=data$Age_Dist), FUN=sum)/n_obs)[,2]
theta_agedist_M_0<-(1/k_A+((k_A-1)/2-(i_A-1))/k_A^2)
for (i in c(k_A:2)){
if (theta_agedist_M_0[i-1]/theta_agedist_M_0[i]<theta_agedist_U_0[i-1]/theta_agedist_U_0[i]){
theta_agedist_M_0[i-1]<-theta_agedist_M_0[i]*theta_agedist_U_0[i-1]/theta_agedist_U_0[i]
}
}
theta_agedist_M_0<-theta_agedist_M_0/sum(theta_agedist_M_0)
print(theta_agedist_M_0/theta_agedist_U_0)
print(theta_strdist_FN_M_0/theta_strdist_FN_U_0)
print(theta_strdist_LN_M_0/theta_strdist_LN_U_0)
#Match enriched sample
p_age_M<-theta_agedist_M_0[data[,1]+1]
p_age_U<-theta_agedist_U_0[data[,1]+1]
p_str_FN_M<-theta_strdist_FN_M_0[data[,2]]
p_str_FN_U<-theta_strdist_FN_U_0[data[,2]]
p_str_LN_M<-theta_strdist_LN_M_0[data[,3]]
p_str_LN_U<-theta_strdist_LN_U_0[data[,3]]
m<-(p_str_FN_M*p_str_LN_M*p_age_M*p_M_0)/(p_M_0)
u<-(p_str_FN_U*p_str_LN_U*p_age_U*p_U_0)/(p_U_0)
data<-data[order(-log(m/u)),]
weight<-log(m/u)
weight<-weight[order(-weight)]
data<-cbind(data,c(1:length(data$Count)))
data$Count<-data$Count/(data[,6])
n_obs<-sum(data$Count)
p_M_0<-data$N[1]/n_obs
p_U_0<-1-p_M_0
## Start loop
t<-1
error<-10
error_v<-vector()
while(error>stop_at_param & t<iter){
#Pr(distance/parameters)
p_age_M<-theta_agedist_M_0[data[,1]+1]
p_age_U<-theta_agedist_U_0[data[,1]+1]
p_str_FN_M<-theta_strdist_FN_M_0[data[,2]]
p_str_FN_U<-theta_strdist_FN_U_0[data[,2]]
p_str_LN_M<-theta_strdist_LN_M_0[data[,3]]
p_str_LN_U<-theta_strdist_LN_U_0[data[,3]]
w<-(p_str_FN_M*p_str_LN_M*p_age_M*p_M_0)/((p_str_FN_M*p_str_LN_M*p_age_M*p_M_0)+(p_str_FN_U*p_str_LN_U*p_age_U*p_U_0))
p_M_1<-weighted.mean(w,data[,4])
p_U_1<-1-p_M_1
theta_agedist_M_1<-vector()
theta_agedist_U_1<-vector()
theta_strdist_FN_M_1<-vector()
theta_strdist_FN_U_1<-vector()
theta_strdist_LN_M_1<-vector()
theta_strdist_LN_U_1<-vector()
#Updated values of the parameters
for (i in 1:k_A){
theta_agedist_M_1[i]<-(w[which(data[,1]==i-1)]%*%data[which(data[,1]==i-1),4])/(w%*%data[,4])
theta_agedist_U_1[i]<-((1-w[which(data[,1]==i-1)])%*%data[which(data[,1]==i-1),4])/((1-w)%*%data[,4])
}
for (i in 1:k_N){
theta_strdist_FN_M_1[i]<-(w[which(data[,2]==i)]%*%data[which(data[,2]==i),4])/(w%*%data[,4])
theta_strdist_FN_U_1[i]<-((1-w[which(data[,2]==i)])%*%data[which(data[,2]==i),4])/((1-w)%*%data[,4])
theta_strdist_LN_M_1[i]<-(w[which(data[,3]==i)]%*%data[which(data[,3]==i),4])/(w%*%data[,4])
theta_strdist_LN_U_1[i]<-((1-w[which(data[,3]==i)])%*%data[which(data[,3]==i),4])/((1-w)%*%data[,4])
}
#Check difference in absolute value
t<-t+1
error1<-max(abs(rbind(theta_agedist_M_1,theta_agedist_U_1)-rbind(theta_agedist_M_0,theta_agedist_U_0)))
error2<-max(abs(rbind(theta_strdist_FN_M_1,theta_strdist_FN_U_1,theta_strdist_LN_M_1,theta_strdist_LN_U_1)-rbind(theta_strdist_FN_M_0,theta_strdist_FN_U_0,theta_strdist_LN_M_0,theta_strdist_LN_U_0)))
error3<-abs(p_M_1-p_M_0)
error<-max(error1,error2,error3)
error_v[t]<-error
#Update values
theta_agedist_M_0<-theta_agedist_M_1
theta_agedist_U_0<-theta_agedist_U_1
theta_strdist_FN_M_0<-theta_strdist_FN_M_1
theta_strdist_FN_U_0<-theta_strdist_FN_U_1
theta_strdist_LN_M_0<-theta_strdist_LN_M_1
theta_strdist_LN_U_0<-theta_strdist_LN_U_1
p_M_0<-p_M_1
p_U_0<-p_U_1
print(t)
}
#Create final w:
w_final<-(p_str_FN_M*p_str_LN_M*p_age_M*p_M_0)/((p_str_FN_M*p_str_LN_M*p_age_M*p_M_0)+(p_str_FN_U*p_str_LN_U*p_age_U*p_U_0))
w_final<-round(w_final, 8)
#Save parameters
parameters<-c(theta_agedist_M_0,theta_agedist_U_0,theta_strdist_FN_M_0,theta_strdist_FN_U_0,theta_strdist_LN_M_0,theta_strdist_LN_U_0,p_M_0)
names<-c("AM0","AM1","AM2","AM3","AM4","AM5","AU0","AU1","AU2","AU3","AU4","AU5","FM0","FM1","FM2","FM3","FU0","FU1","FU2","FU3","LM0","LM1","LM2","LM3","LU0","LU1","LU2","LU3","P")
parameters<-cbind(names, parameters)
file1<-paste(EMmatches,"EM_Estimates_parameters.csv",sep="")
write.table(parameters,file1, row.names=F)
#Save estimates of probabilities (w)
probs_export<-cbind(data, w_final)
colnames(probs_export) <- c("Age_Dist", "strdist_FN_index", "strdist_LN_index", "counts", "N", "w_final")
file2<-paste(EMmatches,"EM_Estimates_probabilities.csv",sep="")
write.table(probs_export,file2, row.names=F)
|
0dedd99043c8ea48bf311280366ae359d6bb4841 | bdd8d4b527d36aa0c69d127aa6c071b70dc3fffb | /Admin/testando - fields of study.R | 7f90e47fe53ea76373569052f3ff3a550e94b385 | [] | no_license | antrologos/harmonizeIBGE | 2abc3fa53106c026d29e11f83bae8e642049ab86 | 8c6053b54b434eddab84f6e32f43424f22d06170 | refs/heads/master | 2022-03-13T11:16:06.906636 | 2022-02-28T17:25:21 | 2022-02-28T17:25:21 | 141,569,637 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 6,495 | r | testando - fields of study.R | rm(list=ls());gc();Sys.sleep(.5);gc()
options(scipen=999)
library(harmonizeIBGE)
library(Hmisc)
library(descr)
library(fst)
#======================================================================================================
setwd("E:/Dropbox-Ro/Dropbox/Rogerio/Bancos_Dados/Censos")
variaveis <- fread("E:/Google Drive/RCodes/PacotesR/harmonizeIBGE/Admin/variaveis_CENSOS.csv")
anos <- c(1960, 1970, 1980, 1991, 2000, 2010)
themes_to_open <- c("identification", "demographics", "education")
n = 30000000
read_harmonize_and_save = F
if(read_harmonize_and_save == T){
for(i in 1:6){
ano = variaveis$year[i]
print(paste("===================================================================================", ano))
vars_to_open <- harmonizeIBGE:::list_originalVariables_to_drop(ano, themes = themes_to_open) %>%
unlist() %>%
c(., toupper(.), tolower(.)) %>%
unique()
vars_to_drop <- harmonizeIBGE:::list_originalVariables_to_drop(ano, themes = c("identification", "demographics")) %>%
unlist() %>%
c(tolower(.)) %>%
unique()
if(ano == 1970){
vars_to_open <- c(vars_to_open, "CEM005")
vars_to_drop <- c(vars_to_drop, "CEM005")
}
assign(x = paste0("c_",ano),
value = fread(paste0("Censo ", ano, "/", variaveis$file_person[i]),
select = vars_to_open,
nrows = n) %>%
prepare_to_harmonize(type = "census", year = ano, state_var_name = ifelse(ano == 1970, "CEM005", ""))
)
Sys.sleep(.5);gc()
assign(x = paste0("c_",ano),
value = get(paste0("c_",ano)) %>%
harmonize_themes(themes = c("identification", "demographics")) %>%
filter(age >= 17) %>%
select(-vars_to_drop))
gc();Sys.sleep(.5);gc()
assign(x = paste0("c_",ano),
value = get(paste0("c_",ano)) %>%
harmonize_themes(themes = "education") %>%
filter(!is.na(educationAttainment)) %>%
setDT())
gc();Sys.sleep(.5);gc()
}
setwd("e:/censos_tmp")
for(ano in anos){
print(ano)
write_fst(x = get(paste0("c_",ano)), path = paste0("censo_",ano,"_fieldsOfStudy.csv"))
gc()
}
}else{
setwd("e:/censos_tmp")
for(ano in anos){
print(ano)
assign(x = paste0("c_",ano),
value = read_fst(path = paste0("censo_",ano,"_fieldsOfStudy.csv"),as.data.table = T) %>%
select(-idhh, -idperson, -famStatus, -nonrelative, -levelattnd, -literacy) %>%
prepare_to_harmonize(type = "census", year = ano, state_var_name = "CEM005")
)
gc();Sys.sleep(.2);gc()
}
}
c_1991[, wgtperson := wgtperson/(10^8)]
#======================================================================================================
#labels_isced <- readxl::read_xlsx(crosswalk_location, sheet = "Fields_Codes_labels") %>%
# select(isced_code_level3, isced_label_level3_en) %>%
# rename(isced = isced_code_level3,
# label = isced_label_level3_en) %>%
# setDT(key = "isced")
ano = 1960
freq_isced_aggreg = NULL
for(ano in anos){
print(ano)
assign(x = paste0("c_",ano),
value = get(paste0("c_",ano)) %>%
build_education_fieldsOfStudy(aggregated = T)
)
gc();Sys.sleep(.3);gc()
table = get(paste0("c_",ano))[, freq(label_fieldsOfStudy, w = wgtperson)]
freq_isced_aggreg_i = tibble(ano = ano,
isced = attr(table, "dimnames")[[1]],
freq_abs = round(table[,1],digits = 0),
freq_rel = round(table[,3], digits =3)) %>%
filter(complete.cases(.))
freq_isced_aggreg <- bind_rows(freq_isced_aggreg, freq_isced_aggreg_i)
gc();Sys.sleep(.1);gc()
}
freq_isced = NULL
for(ano in c(1980,1991,2000,2010)){
print(ano)
assign(x = paste0("c_",ano),
value = get(paste0("c_",ano)) %>%
build_education_fieldsOfStudy(aggregated = F)
)
gc();Sys.sleep(.3);gc()
table = get(paste0("c_",ano))[, freq(label_fieldsOfStudy, w = wgtperson)]
freq_isced_i = tibble(ano = ano,
isced = attr(table, "dimnames")[[1]],
freq_abs = round(table[,1],digits = 0),
freq_rel = round(table[,3], digits =3)) %>%
filter(complete.cases(.))
freq_isced <- bind_rows(freq_isced, freq_isced_i)
gc();Sys.sleep(.1);gc()
}
#c_1980[v525 == 85, fieldsOfStudy := 999]
#c_1980[, freq(fieldsOfStudy)]
freq_abs_isced_wide_aggreg <- freq_isced_aggreg %>%
select(-freq_rel) %>%
filter(!(isced=="Total")) %>%
spread(key = ano, value = freq_abs) %>%
setDT()
freq_rel_isced_wide_aggreg <- freq_isced_aggreg %>%
select(-freq_abs) %>%
mutate(freq_rel = round(freq_rel, 3)) %>%
filter(!(isced=="Total")) %>%
spread(key = ano, value = freq_rel) %>%
setDT()
freq_abs_isced_wide <- freq_isced %>%
select(-freq_rel) %>%
filter(!(isced=="Total")) %>%
spread(key = ano, value = freq_abs) %>%
setDT()
freq_rel_isced_wide <- freq_isced %>%
select(-freq_abs) %>%
filter(!(isced=="Total")) %>%
mutate(freq_rel = round(freq_rel, 3)) %>%
spread(key = ano, value = freq_rel) %>%
setDT()
|
5ace1e957a32ec7f8ce702bcefd13bc3a46af9c3 | b4a58ba2dbffed266dc06f6dcb32f22797271f2e | /04.heatmap.R | 7ed517177dfd0895e1a617c8823366557784d54d | [] | no_license | SamYangBio/R | 063476d8cb29e1f00d63152a45d4ab78d34067a2 | 7c756d2c74071801f46c33646c7ba19104aef987 | refs/heads/master | 2020-03-24T23:26:01.455474 | 2018-10-14T10:33:13 | 2018-10-14T10:33:13 | 143,134,931 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 761 | r | 04.heatmap.R | args <- commandArgs(TRUE)
input <- args[1]
out <- args[2]
library('gplots')
a = read.table(input, sep = "\t", header = T, check.names = F)
lie = ncol(a)
hang = nrow(a)
high_pre = hang/10
high = round(high_pre, digits = 0)
high = high + 10
high = ifelse(high < 7, 7, ifelse(high>200, 200, high))
x = a[,2:lie]
y = as.matrix(x)
y = log10(y+1)
rownames(y) = a[,1]
scale = ifelse(ncol(y)>2, "row", "none")
pdf(file = out,height=high)
par(oma = c(3,3,3,5))
heatmap.2(y,
Colv=NA,dendrogram=('row'), # make x axis clusterting
col=colorRampPalette(c("navy","white","firebrick3")),
trace = "none",
cexCol=1,
srtCol=45,
scale = scale,
cexRow = 0.6,
lhei=c(10,100)
)#,labRow=F)
|
247bec4c0d7037087dfebaca256a9699fac30649 | d2061a237532f631fde4eb2651093bed8593c403 | /man/Figure3_5b.Rd | e92d24fd6bac00379580438e859f8e2458671c58 | [] | no_license | cran/sur | 23fb12f03ea816e80e0b2fd0a1491f2ca81218dd | 612cc5e2391a019ed157f90958d31f40e9466560 | refs/heads/master | 2020-12-22T19:13:21.454433 | 2020-08-25T21:30:02 | 2020-08-25T21:30:02 | 236,903,605 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 437 | rd | Figure3_5b.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-figures.R
\docType{data}
\name{Figure3_5b}
\alias{Figure3_5b}
\title{Figure 3.5(B) Data}
\format{A data frame with 75 rows and 1 variable:
\describe{
\item{DistnB}{numeric score from a symmetric distribution}
}}
\usage{
Figure3_5b
}
\description{
This dataset contains simulated scores used to generate Figure 3.5(B) of Chapter 3.
}
\keyword{datasets}
|
4c1eb42eb0fac1178462f410ad6b45d2d65a9bed | 67222f69dd1a5b5ced1d28df833a303924dbde35 | /2. Algorithms on Datasets/Supervised Machine Learning Techniques/Decision Tree/Company_Data/Company_Decision+Tree.R | dfd991d189ae2a2d3b632e7d4a1b72a2f44cb5f0 | [] | no_license | mandarmakhi/DataScience-R-code | 4f75906507e303fb9b438b99a5eab0a74bcc77f6 | 8c1728b306e53668b1814283da9936503e0554b9 | refs/heads/master | 2023-01-19T04:55:11.171455 | 2020-11-28T07:59:55 | 2020-11-28T07:59:55 | 263,417,867 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,263 | r | Company_Decision+Tree.R | #Decision Tree
#A cloth manufacturing company is interested to know about the segment or attributes causes high sale.
#install.packages("caret")
#install.packages("C50")
library(C50)
library(factoextra)
library(caret)
library(gmodels)
#Lets Import the Dataset
company <- read.csv("C:/Users/Mandar/Desktop/data/assignments/decision tree/Company_Data/Company_Data.csv")
View(company)
attach(company)
head(company)
summary(company) #Gives the Summary of the Dataset
str(company) #Gives the Structure of the Dataset
ShelveLoc <- as.factor(ShelveLoc)
Urban <- as.factor(Urban)
US <- as.factor(US)
High <- ifelse(Sales > 8,"Yes","No")
High <- as.factor(High)
#Lets Combine it With the Main Dataset
company_new <- cbind(company[,-1], High) #Here we Exclude the Sales Column as we have Derived a responsive Variable High using it
#Lets Create the Training and Testing sets
indatapartition <- createDataPartition(company_new$High, p=.70, list = F) #This will Hold 70% of the whole dataset
training <- company_new[indatapartition,]
testing <- company_new[-indatapartition,]
#Lets Fit the Model for Entire Data Now
tree_model <- C5.0(High~. , data =company_new)
summary(tree_model) #Here we can See there was an Error of 6.5%
tree_model
#Lets Plot the Tree
plot(tree_model)
#Lets Build the Model for Training Set and Then Predict he Values for Testing set
t_model <- C5.0(High~. , data = training, method ="class")
summary(t_model) #Here we can see that there is an Error rate of 8.5%
t_model
pred <- predict.C5.0(t_model, newdata = testing)
pred
a<- table(testing$High, pred)
Accuracy <- sum(diag(a))/sum(a)
Accuracy
CrossTable(testing$High, pred)
#So here we can see that there are Misclassifications, we can improve the models using Bagging and Boosting techniques
#We use For loop for bagging in order to make multiple models
acc<- c()
for (i in 1:50) #This will create 500 different models
{
print(i)
#Build a Model
fittree <- C5.0(High~. , data = training,method = "class", trials = 10) #Trials is a Boosting Parameter
pred2<- predict.C5.0(fittree,testing[,-11])
ab<- table(testing$High, pred2)
#To save the Accuracy of the models
acc<- c(acc,sum(diag(ab))/sum(ab))
}
summary(acc)
summary(fittree)
plot(fittree)
|
2d2fac878ca51a454eb0d917e7b9081b962bd167 | c8a8ad16a9633de5bf1bd2917ae63702ae58d4ac | /R/allele_divtables-class.R | 1829b19b86b06fb84fecfae1b02ae9168f84828b | [] | no_license | douglasgscofield/dispersalDiversity | 5487b5cc8ce31e9fc64e5398afa71db9a12a6ac6 | 3c826115e6fca06b801346452e2659ec5142a715 | refs/heads/main | 2023-04-14T08:17:59.717926 | 2021-03-23T16:44:16 | 2021-03-23T16:44:16 | 4,307,834 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,852 | r | allele_divtables-class.R | #' @include divtable-class.R
NULL
#' List of divtables holding allele diversity data
#'
#' An object of class \code{allele_divtables} is a list of
#' \code{\link{divtable}} objects, each representing sites-by-allele counts
#' data for a single genetic locus. Row and column names are
#' the site names and individual alleles, respectively.
#' This is the basic data object for analysis of genetic data using the
#' \code{\link{dispersalDiversity}} package. It is accepted by the
#' function \code{\link{diversity}}, which generates descriptive statistics,
#' and the functions \code{\link{alphaDiversityTest}},
#' \code{\link{gammaContrastTest}} and others that test for differences
#' in the structure of genetic diversity.
#'
#' Assembling an \code{allele_divtables} by hand is labourious. More
#' typically, genotype data will be in a class \code{\link{genalex}} object
#' and then converted to \code{\link{allele_divtables}} using
#' \code{\link{createAlleleTables}}. The \code{\link{createAlleleTables}}
#' and \code{\link{as.allele_divtables}} functions will attempt to convert
#' non-\code{\link{genalex}} objects using to \code{\link{genalex}} format
#' using \code{\link[readGenalex]{as.genalex}}, but this may not be successful.
#'
#' @examples
#'
#' ## One possible way to plot an \code{allele_divtables} object, plotting
#' ## each locus as a separate divtable in a two-column format
#'
#' data(Qagr_pericarp_genotypes)
#' pal <- createAlleleTables(Qagr_pericarp_genotypes)
#' par(mfrow = c(round(length(pal) / 2), 2))
#' lapply(names(pal), function(n) plot(pal[[n]], main = n, l2 = NULL, las = 2))
#'
#' @name allele_divtables-class
#'
#' @aliases allele_divtables
#'
NULL
#' Generate an allele_divtables object from a class genalex object
#'
#' S3 method to convert an object of class \code{genalex} to an object of
#' class \code{\link{allele_divtables}}. a list of \code{\link{divtable}}
#' objects representing sites-by-allele counts. This is an S3 generic
#' so that other methods might be written to convert other genetic
#' formats.
#'
#' If \code{x} is not of class \code{genalex}, an attempt is made to convert
#' it to class \code{genalex} using \code{\link[readGenalex]{as.genalex}}
#' from the
#' \href{http://cran.r-project.org/web/packages/readGenalex/index.html}{readGenalex}
#' package. An error will be produced if \code{x} is of a class or format
#' that cannot be converted to class \code{genalex}.
#'
#' Another option for converting genotypes to \code{\link{allele_divtables}}
#' objects is to convert to one of the formats recognised by
#' \code{\link[readGenalex]{as.genalex}}.
#'
#' Although missing alleles may be common in genotypic data, there is no
#' provision in \code{\link{diversity}} and other functions in this package
#' for missing data. Missing alleles are recognised and excluded if they
#' match one of the values in \code{exclude}. The numbers of missing alleles
#' recognised is reported if \code{quiet = FALSE}.
#'
#' @note \code{as.allele_divtables} is a synonym, unless \code{x} is of class
#' \code{list}. If so, if the class of each element of \code{x} is of class
#' \code{\link{divtable}}, then the class of \code{x} is changed to
#' \code{\link{allele_divtable}}. If \code{x} is of class
#' \code{allele_divtables} it is returned unchanged.
#'
#' @param x Object of class \code{genalex} holding genotypes to be converted,
#' or of a class and format that can be converted to \code{genalex} using
#' \code{\link[readGenalex]{as.genalex}}
#'
#' @param exclude Values in \code{x} that indicate missing alleles, these are
#' excluded from the \code{divtable} entries for each locus
#'
#' @param quiet If \code{TRUE}, report the number of missing alleles excluded
#'
#' @return Object of class \code{\link{allele_divtable}}
#'
#' @examples
#'
#' ## Use genotype data from readGenalex package, already loaded
#' data(Qagr_pericarp_genotypes)
#' pal <- createAlleleTables(Qagr_pericarp_genotypes)
#' str(pal)
#'
#' ## The divtable for the first locus
#' pal[[1]]
#'
#' ## allele_divtables removes and can report missing data
#' data(Qagr_adult_genotypes)
#' aal <- createAlleleTables(Qagr_adult_genotypes, quiet = FALSE)
#'
#' @export createAlleleTables as.allele_divtables as.allele_divtables.default as.allele_divtables.genalex as.allele_divtables.list as.allele_divtables.allele_divtables
#'
#' @aliases createAlleleTables as.allele_divtables as.allele_divtables.default as.allele_divtables.genalex as.allele_divtables.list as.allele_divtables.allele_divtables
#'
#' @name createAlleleTables
#'
NULL
createAlleleTables <- function(x, ...) UseMethod("createAlleleTables")
#' @rdname createAlleleTables
#'
#' @export
#'
createAlleleTables.default <- function(x, ...)
{
if (inherits(x, 'data.frame') || inherits(x, 'loci')) {
x <- as.genalex(x)
return(createAlleleTables.genalex(x))
} else {
stop("Cannot convert to class 'allele_divtables', perhaps ",
deparse(substitute(x)), " can be converted to class 'genalex'?",
" See '?readGenalex::as.genalex'.")
}
}
#' @rdname createAlleleTables
#'
#' @export
#'
createAlleleTables.allele_divtables <- function(x, ...)
x
#' @rdname createAlleleTables
#'
#' @export
#'
createAlleleTables.genalex <- function(x, exclude = c(NA, "0"),
quiet = TRUE, ...)
{
ploidy <- attr(x, "ploidy")
lc <- attr(x, "locus.columns")
ln <- attr(x, "locus.names")
population <- attr(x, "pop.title")
ans <- list()
ex <- list()
for (il in 1:length(lc)) {
alleles <- as.vector(unlist(x[, lc[il]:(lc[il] + ploidy - 1)]))
ex[[ ln[il] ]] <- sum(alleles %in% exclude)
pop <- rep(x[[population]], ploidy)
ans[[ ln[il] ]] <- as.divtable(table(pop, alleles, exclude = exclude))
}
if (sum(unlist(ex)) && ! quiet)
cat(sprintf("Excluding %d entries based on 'exclude = c(%s)'\n",
sum(unlist(ex)), paste(collapse = ", ", exclude)))
structure(ans, class = c('allele_divtables', 'list'))
}
#----------------------------------
#
# synonyms, documented and exported in createAlleleTables above
as.allele_divtables <- function(x, ...) UseMethod("as.allele_divtables")
as.allele_divtables.default <- function(x, ...)
createAlleleTables.default(x, ...)
as.allele_divtables.genalex <- function(x, ...)
createAlleleTables.genalex(x, ...)
# These are not synonyms but are still documented in createAlleleTables above
#
as.allele_divtables.list <- function(x, ...)
{
if (all(sapply(x, inherits, 'divtable')))
structure(x, class('allele_divtables', 'list'))
else stop(deparse(substitute(x)),
" cannot be converted to class allele_divtables,",
" all members must be class 'divtable'")
}
as.allele_divtables.allele_divtables <- function(x, ...)
x
|
fa09b7ad778847af8f23ec06c4004c67a6c9932c | 059a3965261ee3ce2b703231778b66e500ed769a | /CartoDB/man/cartodb.row.insert.Rd | 9e050665f5ec1c3868d815c894b884ee535ebb79 | [] | no_license | alexsingleton/cartodb-r | 8a43c14e5af6bf18fa8fe4177817a0a5bcb248ba | ea893c4b37edee1220fc279d50e09fb1c8c8de48 | refs/heads/master | 2020-06-09T12:47:01.919613 | 2016-12-09T13:35:34 | 2016-12-09T13:35:34 | 76,037,782 | 2 | 0 | null | 2016-12-09T13:29:41 | 2016-12-09T13:29:41 | null | UTF-8 | R | false | false | 1,129 | rd | cartodb.row.insert.Rd | \name{cartodb.row.insert}
\alias{cartodb.row.insert}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Insert a single record into CartoDB
}
\description{
Insert a single record into CartoDB
}
\usage{
cartodb.row.insert(name=NULL,columns=NULL,values=NULL, quoteChars=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name}{
The name of a table you have in CartoDB
}
\item{columns}{
A list of the columns you are providing values for
}
\item{values}{
A list of values for your columns
}
\item{quoteChars}{
For all values in your values list, if the value class = "character", it will be quoted for SQL automatically
}
}
\author{
Andrew Hill <[email protected]>
}
\examples{
## Insert a new row into CartoDB and save the cartodb_id of the newly created row
cartodb_id <- cartodb.row.insert(name="r_test",columns=list("country","latitude","longitude"),values=list("United States",40.714,74.006))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
776c25a422350e10616e8cc7b5ee9e57605989eb | fe267079c286e35b4989240c407d69e83679f3f9 | /man/prepare.Rd | 9f2482362ac45a12268af1224e17738bba86209c | [] | no_license | cran/clustTool | f9c1bb306242520332f235aa532928049c67da44 | d238b796bcfd6926794a4e378ce0012a55eafd21 | refs/heads/master | 2016-09-05T09:32:29.184558 | 2010-08-16T00:00:00 | 2010-08-16T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,387 | rd | prepare.Rd | \name{prepare}
\alias{prepare}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Function for tranformation and standardisation }
\description{
This function can used for transformation and standardisation of the data.
}
\usage{
prepare(x, scaling = "classical", transformation = "logarithm", powers = "none")
}
\arguments{
\item{x}{ data frame or matrix }
\item{scaling}{ Scaling of the data.
Possible values are: \dQuote{classical}, \dQuote{robust}, \dQuote{none} }
\item{transformation}{ Transformation of the data.
Possible values are: \dQuote{logarithm}, \dQuote{boxcox}, \dQuote{bcOpt}, \dQuote{logratio},\dQuote{logcentered},\dQuote{iso},\dQuote{none} }
\item{powers}{ Powers for Box-Cox transformation for each variable (if \dQuote{boxcox} is chosen) }
}
\details{
\bold{Transformation}:
\dQuote{logarithm} replaces the values of x with the natural logarithm by using function \sQuote{log}.
\dQuote{boxcox} apply a Box-Cox transformation on each variable. Powers must be specified.
\dQuote{bcOpt} apply a Box-Cox transformation on each variable. Powers are calculated with function \sQuote{box.cox.powers}.
\dQuote{none} is also possible.
Transformation before clustering: Cluster analysis in general does not need normally distributed data. However, it is advisable that heavily skewed data are first transformed to a more symmetric distribution. If a good cluster structure exists for a variable we can expect a distribution which has two or more modes. A transformation to more symmetry will preserve the modes but remove large skewness.
\bold{Standardisation}:
\dQuote{classical} apply a \emph{z}-Transformation on each variable by using function \sQuote{scale}.
\dQuote{robust} apply a robustified \emph{z}-Transformation by using median and MAD.
\dQuote{none} is also possible.
Standardisation before clustering: Standardisation is needed if the variables show a striking difference in the amount of variablity.
}
\value{
Transformed and standardised data.
}
\author{ Matthias Templ }
\seealso{ \code{\link{scale}}, \code{\link[car]{box.cox.powers}} }
\examples{
require(mvoutlier)
data(humus)
x <- humus[,4:40]
xNew <- prepare(x, scaling="classical", transformation="logarithm")
}
\keyword{ manip }
|
443d6f2f7ee000a470d256dcb826509db41a19de | 13cdc54d90f2ba332f558eaebd799de1edfe4a17 | /scripts/visualizations.R | 48dde87aa02f04c7fb3b92fd8302e6d007970ea1 | [
"CC0-1.0"
] | permissive | UACC-renedherrera/UAZCC_deliverables | adecdaf55e811ea6ef1cfda86b4b879c19104202 | dcadbb69d121776c7538c7217ecd8876f5098bc0 | refs/heads/master | 2023-03-08T21:51:32.340103 | 2021-02-27T06:55:16 | 2021-02-27T06:55:16 | 282,557,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,298 | r | visualizations.R | #### set up ####
# packages
library(here)
library(tidyverse)
library(ggthemes)
# color palette
blues_8 <- c("#f7fbff",
"#deebf7",
"#c6dbef",
"#9ecae1",
"#6baed6",
"#4292c6",
"#2171b5",
"#084594")
blues_3 <- c("#deebf7",
"#9ecae1",
"#3182bd")
mixed_8 <- c("#deebf7",
"#9ecae1",
"#3182bd",
"#edf8e9",
"#bae4b3",
"#74c476",
"#31a354",
"#006d2c")
#### read data ####
df <- read_rds("data/tidy/data_for_visualizations.rds")
distinct(df, category)
str(df)
#### race ####
table_race <- df %>%
filter(category == "Race") %>%
group_by(area)
table_race$attribute <- as.factor(table_race$attribute)
table_race$attribute <- ordered(table_race$attribute, levels = c("Non-Hispanic White", "Hispanic", "American Indian", "Non-Hispanic Black"))
table_race %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value, fill = attribute)) +
geom_bar(stat = "identity", position = "stack", color = "black", alpha = 0.666) +
labs(
title = "Proportion of each Race and Ethnicity Category in Catchment Geographies",
subtitle = "",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("demographics_race.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
#### demographics ####
# age ----
table_demographics <- df %>%
filter(category == "Demographics",
area != "Catchment")
unique(table_demographics$area)
table_demographics$area <- ordered(table_demographics$area, levels = c("USA", "AZ", "Cochise", "Pima", "Pinal", "Santa Cruz", "Yuma"))
unique(table_demographics$attribute)
table_demographics_age <- table_demographics %>%
filter(attribute == "Median Age")
unique(table_demographics_age$attribute)
table_demographics_age$attribute <- as.factor(table_demographics_age$attribute)
table_demographics_age %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value)) +
geom_bar(stat = "identity", fill = "#0C234B") +
geom_label(aes(label = value)) +
labs(
title = "Median Age in Catchment Geographies",
subtitle = "",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("demographics_age.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# sex ----
table_demographics <- df %>%
filter(category == "Demographics")
unique(table_demographics$attribute)
table_demographics_sex <- table_demographics %>%
filter(attribute == "Female")
unique(table_demographics_sex$attribute)
table_demographics_sex$attribute <- as.factor(table_demographics_sex$attribute)
table_demographics_sex %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value)) +
geom_bar(stat = "identity", fill = "#0C234B") +
geom_label(aes(label = round(value, digits = 2))) +
labs(
title = "Female Proportion in Catchment Geographies",
subtitle = "",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("demographics_sex.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# education attainment ----
table_demographics <- df %>%
filter(category == "Demographics")
unique(table_demographics$attribute)
table_demographics_edu <- table_demographics %>%
filter(attribute == "High School Graduation" | attribute == "Some College" | attribute == "College Graduate")
unique(table_demographics_edu$attribute)
table_demographics_edu$attribute <- as.factor(table_demographics_edu$attribute)
table_demographics_edu$attribute <- ordered(table_demographics_edu$attribute, levels = c("High School Graduation", "Some College", "College Graduate"))
table_demographics_edu %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value, fill = attribute)) +
geom_bar(stat = "identity", position = "dodge", color = "black", alpha = 0.666) +
labs(
title = "Proportion of Educational Attainment in Catchment Geographies",
subtitle = "Proportion of population with high school diploma or college",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("demographics_edu.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# income ----
table_demographics <- df %>%
filter(category == "Demographics")
table_demographics$area <- ordered(table_demographics$area, levels = c("USA", "AZ", "Catchment", "Cochise", "Pima", "Pinal", "Santa Cruz", "Yuma"))
unique(table_demographics$area)
unique(table_demographics$attribute)
table_demographics_income <- table_demographics %>%
filter(attribute == "Median Family income" | attribute == "Mean family income")
unique(table_demographics_income$attribute)
table_demographics_income$attribute <- as.factor(table_demographics_income$attribute)
table_demographics_income %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value)) +
geom_bar(stat = "identity", position = "dodge", color = "black", fill = "#0C234B") +
geom_label(aes(label = value)) +
facet_wrap("attribute") +
labs(
title = "Mean and Median Income in Catchment Geographies",
subtitle = "in US dollars",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("demographics_income.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# poverty ----
table_demographics <- df %>%
filter(category == "Demographics")
unique(table_demographics$attribute)
table_demographics_poverty <- table_demographics %>%
filter(attribute == "Food Insecurity" | attribute == "Households Below Poverty Level" | attribute == "Unemployment" | attribute == "Uninsured")
unique(table_demographics_poverty$attribute)
table_demographics_poverty$attribute <- as.factor(table_demographics_poverty$attribute)
table_demographics_poverty %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value, fill = attribute)) +
geom_bar(stat = "identity", position = "dodge", color = "black", alpha = .666) +
labs(
title = "Indicators of Poverty in Catchment Geographies",
subtitle = "",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates;
U.S. Bureau of Labor Statistics May 2020;
Map the Meal Gap 2020"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("demographics_poverty.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
#### Health Behavior Risk Factors ####
# physical activity ----
table_risk <- df %>%
filter(category == "Health Behavior Risk Factor",
area != "Catchment")
unique(table_risk$area)
table_risk$area <- ordered(table_risk$area, levels = c("USA", "AZ", "Cochise", "Pima", "Pinal", "Santa Cruz", "Yuma"))
unique(table_risk$attribute)
table_risk_pa <- table_risk %>%
filter(attribute == "No Leisure-Time Physical Activity" | attribute == "Adult Obesity" | attribute == "Diabetes")
unique(table_risk_pa$attribute)
table_risk_pa$attribute <- as.factor(table_risk_pa$attribute)
table_risk_pa$attribute <- ordered(table_risk_pa$attribute,
levels = c("No Leisure-Time Physical Activity", "Adult Obesity", "Diabetes"))
table_risk_pa %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value, fill = attribute)) +
geom_bar(stat = "identity", position = "dodge", color = "black", alpha = .666) +
geom_label(aes(label = value)) +
labs(
title = "Health Behavior and Risk Factors in Catchment Geographies",
subtitle = "Rates of Physical Activity, Obesity, and Diabetes",
y = "",
x = "",
caption = "Source: 2018 BRFSS Survey Data;
US Diabetes Surveillance System"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("risk_pa.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# alcohol and smoking ----
table_risk <- df %>%
filter(category == "Health Behavior Risk Factor",
area != "Catchment")
unique(table_risk$area)
table_risk$area <- ordered(table_risk$area, levels = c("USA", "AZ", "Cochise", "Pima", "Pinal", "Santa Cruz", "Yuma"))
unique(table_risk$attribute)
table_risk_behavior <- table_risk %>%
filter(attribute == "Excessive drinking (BRFSS)" | attribute == "Adult Smoking")
unique(table_risk_behavior$attribute)
table_risk_behavior$attribute <- as.factor(table_risk_behavior$attribute)
table_risk_behavior %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value, fill = attribute)) +
geom_bar(stat = "identity", position = "dodge", color = "black", alpha = .666) +
labs(
title = "Health Behavior and Risk Factors in Catchment Geographies",
subtitle = "Rates of Alcohol and Smoking Use",
y = "",
x = "",
caption = "Source: 2017 & 2018 Behavioral Risk Factor Surveillance System"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("risk_smoking.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# vaccination ----
table_risk <- df %>%
filter(category == "Health Behavior Risk Factor",
area != "Catchment")
unique(table_risk$area)
table_risk$area <- ordered(table_risk$area, levels = c("USA", "AZ", "Cochise", "Pima", "Pinal", "Santa Cruz", "Yuma"))
unique(table_risk$attribute)
table_risk_vac <- table_risk %>%
filter(attribute == "HPV vaccination (age range 13-17) 3+ doses")
unique(table_risk_vac$attribute)
table_risk_vac %>%
filter(race == "All") %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value, fill = attribute)) +
geom_bar(stat = "identity", position = "dodge", color = "black", alpha = .666) +
labs(
title = "Health Behavior and Risk Factors in Catchment Geographies",
subtitle = "HPV Vaccination",
y = "",
x = "",
caption = "Source: 2018 National Immunization Survey;
2017 Immunizations for Adolescents Completion Rates, AHCCCS
"
) +
theme_clean() +
theme(legend.position = "") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("risk_vaccination.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
#### Screening ####
table_screening <- df %>%
filter(category == "Screening",
area != "Catchment",
attribute != "Cervical Cancer Screening") %>%
group_by(area)
unique(table_screening$area)
table_screening$area <- ordered(table_screening$area, levels = c("USA", "AZ", "Cochise", "Pima", "Pinal", "Santa Cruz", "Yuma"))
unique(table_screening$attribute)
table_screening$attribute <- as.factor(table_screening$attribute)
table_screening %>%
arrange(desc(attribute)) %>%
ggplot(mapping = aes(x = area, y = value, fill = attribute)) +
geom_bar(stat = "identity", position = "dodge", color = "black", alpha = 0.666) +
labs(
title = "Cancer Screening Rates in Catchment Geographies",
subtitle = "All races and sexes combined",
y = "",
x = "",
caption = "Source: Directly Estimated 2018 BRFSS Data;
2008-2010 County Level Modeled Estimate Combining BRFSS & NHIS"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_fill_brewer(palette = "Accent")
# save plot to file
ggsave("risk_screening.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# demographic disparities ----
unique(df$attribute)
# all 8 geographic areas
df %>%
filter(attribute == "College Graduate" |
attribute == "Rural" |
attribute == "Hispanic" |
attribute == "Households Below Poverty Level" |
attribute == "Food Insecurity" |
attribute == "Unemployment",
race == "All") %>%
ggplot(mapping = aes(x = attribute, y = value, fill = area)) +
geom_bar(color = "black", stat = "identity", position = "dodge", alpha = .5) +
labs(
title = "",
subtitle = "",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates;
Map the Meal Gap 2020"
) +
theme_clean() +
theme(legend.position = "bottom") +
scale_y_continuous(labels = scales::percent_format()) +
scale_fill_manual(values = mixed_8)
# save plot to file
ggsave("demographic_disparities_01_complete.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
# only USA, AZ, Catchment
df %>%
filter(attribute == "College Graduate" |
attribute == "Rural" |
attribute == "Hispanic" |
attribute == "Households Below Poverty Level" |
attribute == "Food Insecurity" |
attribute == "Unemployment",
race == "All",
area == "USA" |
area == "AZ" |
area == "Catchment") %>%
ggplot(mapping = aes(x = attribute, y = value, fill = area)) +
geom_bar(color = "black", stat = "identity", position = "dodge", alpha = .5) +
labs(
title = "",
subtitle = "",
y = "",
x = "",
caption = "Source: U.S. Census Bureau, 2014-2018 American Community Survey 5-Year Estimates;
Map the Meal Gap 2020;
May 2020 U.S. Bureau of Labor Statistics") +
theme_clean() +
theme(legend.position = "bottom") +
scale_y_continuous(labels = scales::percent_format()) +
scale_fill_manual(values = blues_3)
# save plot to file
ggsave("demographic_disparities_01_us_az_catchment.svg",
width = 20,
height = 11.25,
device = svg,
path = "figures/graphics/",
scale = .5
)
|
7af4f28ec4f0c862ced4c64ee8524db6ee098332 | eca503411624bec763cace42856c2c9fdf7b26d5 | /tests/testthat/test-02_flow_data_if.R | 82d4e64794ca0d2c45863dd636d432593c995c1b | [] | no_license | yuewangpanda/flow | d1eaf3da9ab32bbde89deff85ccff39f37a26e9f | e6a812392ea9d4ea91e70c5aa6fb799e14ba32fe | refs/heads/master | 2022-12-24T18:07:49.606674 | 2020-09-25T16:49:25 | 2020-09-25T16:49:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,888 | r | test-02_flow_data_if.R |
#### IF ####
# simple if call without else and empty body
test_that("flow_data works with simple if and empty body",{
fun <- function(x) {
if(x) {}
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "if", "standard", "end", "return"),
code_str = c("fun(x)", "if (x)", "", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "y", "", "n", ""),
arrow = c("->", "->", "->", "->", "->"),
stringsAsFactors = FALSE))
})
# simple if call without else and a symbol in body
test_that("flow_data works with simple if",{
fun <- function(x) {
if(x) foo
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -1, 3),
block_type = c("header", "if", "standard", "end", "return"),
code_str = c("fun(x)", "if (x)", "foo", "", ""),
label = c("", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, -1),
to = c(1, 2, -1, -1, 3),
edge_label = c("", "y", "", "n", ""),
arrow = c("->", "->", "->", "->", "->"),
stringsAsFactors = FALSE))
})
# simple if else call
test_that("flow_data works with simple if else",{
fun <- function(x) {
if(x) foo else bar
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, 3, -1, 4),
block_type = c("header", "if", "standard", "standard", "end", "return"),
code_str = c("fun(x)", "if (x)", "foo", "bar", "", ""),
label = c("", "", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, 3, -1),
to = c(1, 2, -1, 3, -1, 4),
edge_label = c("", "y", "", "n", "", ""),
arrow = c("->", "->", "->", "->", "->", "->"),
stringsAsFactors = FALSE))
})
# simple if else call without else and a symbol in body
# simple if else call without else and a call in body
# simple if else call without else and 2 calls in body
# if else call returning on the left
test_that("flow_data works returning on the yes branch",{
fun <- function(x) {
if(x) return(foo) else bar
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -2, 3, -1, 4),
block_type = c("header", "if", "standard", "return", "standard", "end", "return"),
code_str = c("fun(x)", "if (x)", "return(foo)", "", "bar", "", ""),
label = c("", "", "", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, 3, -1),
to = c(1, 2, -2, 3, -1, 4),
edge_label = c("", "y", "", "n", "", ""),
arrow = c("->", "->", "->", "->", "->", "->"),
stringsAsFactors = FALSE))
})
# if else call stopping on the right
test_that("flow_data works stopping on the no branch",{
fun <- function(x) {
if(x) foo else stop(bar)
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, 3, -3, -1, 4),
block_type = c("header", "if", "standard", "standard", "stop", "end", "return"),
code_str = c("fun(x)", "if (x)", "foo", "stop(bar)", "", "", ""),
label = c("", "", "", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, 3, -1),
to = c(1, 2, -1, 3, -3, 4),
edge_label = c("", "y", "", "n", "", ""),
arrow = c("->", "->", "->", "->", "->", "->"),
stringsAsFactors = FALSE))
})
# if else call stopping on the left AND returning on the right
test_that("flow_data works stopping on the yes branch and returning on the right branch",{
fun <- function(x) {
if(x) stop(foo) else return(bar)
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, -2, 3, -3, 4),
block_type = c("header", "if", "standard", "stop", "standard", "return", "return"),
code_str = c("fun(x)", "if (x)", "stop(foo)", "", "return(bar)", "", ""),
label = c("", "", "", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 1, 3),
to = c(1, 2, -2, 3, -3),
edge_label = c("", "y", "", "n", ""),
arrow = c("->", "->", "->", "->", "->"),
stringsAsFactors = FALSE))
})
# simple if call with a nested if else call
test_that("flow_data works with nested if calls",{
fun <- function(x) {
if(x) if(y) foo else bar
}
data <- flow_data(fun)
# flow_data(fun)
# dput2(data$nodes[1:4])
# dput2(data$edges)
expect_equal(
data$nodes[1:4],
data.frame(
id = c(0, 1, 2, 3, 4, -2, -1, 5),
block_type = c("header", "if", "if", "standard", "standard", "end", "end", "return"
),
code_str = c("fun(x)", "if (x)", "if (y)", "foo", "bar", "", "", ""),
label = c("", "", "", "", "", "", "", ""),
stringsAsFactors = FALSE))
expect_equal(
data$edges,
data.frame(
from = c(0, 1, 2, 3, 2, 4, -2, 1, -1),
to = c(1, 2, 3, -2, 4, -2, -1, -1, 5),
edge_label = c("", "y", "y", "", "n", "", "", "n", ""),
arrow = c("->", "->", "->", "->", "->", "->", "->", "->", "->"),
stringsAsFactors = FALSE))
})
|
a70c095b72aa6332e1e7daa48f9a51c4759d6f59 | 473cf48d1e85a74718b80a7e7aaeb86731f9ad98 | /R/ratio.plot.ade.R | c1633fba8adc1d0781246fcc169389d5f266429c | [] | no_license | cran/epade | f3584067b7b925b1420103559d9292dd7f702de1 | 779623aac20dd6cf53dcefa524d7a59fba1aecb6 | refs/heads/master | 2022-11-15T03:02:32.786022 | 2022-10-27T14:35:16 | 2022-10-27T14:35:16 | 17,695,818 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 25,545 | r | ratio.plot.ade.R | ratio.plot.ade <-
function( M, vnames=NULL, sectext=NULL, main=NULL,xlab=NULL, ylab=NULL, legenlab=NULL, rlab=NULL, col=NULL, tcol=NULL, bgcol=NULL, lcol=NULL, r=NULL, v=c(0,1), lty=c(1,2), xticks=18, hlines=TRUE, legends=TRUE, logaxe=FALSE, wall=0){
if(any(par('mfg')!=c(1,1,1,1)) & any(par('mai') < c(1.02, 0.82, 0.82, 0.42))){
maidiff<-rep(0, 4)
norm<-c(1.02, 0.82, 0.82, 0.42)
maidiff[par('mai')<norm]<- norm[par('mai')<norm] - par('mai')[par('mai')<norm]
par(mai=par('mai')+maidiff)
}
oldpar<-par(no.readonly =TRUE)
oldpar<-oldpar[-which(names(oldpar)%in%c('usr', 'plt', 'pin', 'fin', 'fig', 'mfg', 'mfcol', 'mfrow', 'omd', 'omi', 'oma'))]
on.exit(par(oldpar))
if(!is.list(M)){
ML<-NULL
ML<-as.list(ML)
ML[[1]]<-M
M<-ML
}
if(is.null(vnames)) legends=FALSE
n <- dim(as.matrix(M[[1]]))[1]
if(!is.null(vnames)) vnames<-as.character(as.vector(vnames[1:n]))
if(is.null(vnames)) vnames<-''
N<-length(M)
if(length(legenlab)<N) legenlab<- c(legenlab, rep('?', N-length(legenlab)))
################################################################################
# Colors
if(length(col)<N) col<-NULL
if(is.null(tcol) & wall==0) tcol<-1
if(is.null(tcol) & wall!=0) tcol<-rgb(0.1,0.1,0.25)
if(is.null(bgcol) & wall==0) bgcol<-1
if(is.null(bgcol) & wall!=0) bgcol<-'#DBE0E8'
if(is.null(lcol) & (wall==0 | wall==2| wall==5)) lcol<-bgcol
if(is.null(lcol) & (wall==1 | wall==4)) lcol<-rgb(1,1,1)
if(is.null(lcol) & (wall==3)) lcol<-a.coladd.ade(bgcol, -50)
if(is.null(col) & N==1) col <- tcol
if(is.null(col) & N>1) col <- a.getcol.ade(N)
rcol=col
col2<-a.coladd.ade(col, 175)
fcol= a.coladd.ade(bgcol, -35)
fcol2=a.coladd.ade(bgcol, -100)
fcol3=bgcol
bgcol2<-a.coladd.ade(bgcol, -50)
#
################################################################################
if(is.list(M)){
for(k in 1:length(M)) {
M[[k]]<- apply(as.matrix(M[[k]]), c(1,2), as.numeric)
if(logaxe) M[[k]]<-log(M[[k]])
}
xmin <- min(unlist(M), na.rm=TRUE)
xmax <- max(unlist(M), na.rm=TRUE)
}
if(is.matrix(M)){
xmin <- min(M, na.rm=TRUE)
xmax <- max(M, na.rm=TRUE)
}
if(is.data.frame(M)){
M<-as.matrix(M)
xmin <- min(M, na.rm=TRUE)
xmax <- max(M, na.rm=TRUE)
}
if(logaxe) v=log(v[v!=0])
xrange <- xmax - xmin
if(!is.null(r)){
r<-(r*2.5)
print(xmin)
print(xmax)
xlimw <- c(xmin , (xmax+(xrange*r)))
}
if(is.null(r)){
vlength <-max(nchar(c(vnames, rlab, sectext)))
if(wall==4) r= vlength*0.025 + (sqrt((vlength^4))*0.0004)+0.18
if(wall!=4) r= vlength*0.025 + (sqrt((vlength^4))*0.0002)+0.18
xlimw <- c(xmin , (xmax+(xrange*r)))
}
schift <- (xrange)/6
if(N==1) legends<-F
if(legends) ylimw <- c(0.5, (n+(sqrt(n)/2.25)))
if(!legends & is.null(rlab)) ylimw <- c(0.5, (n+0.5))
if(!legends & !is.null(rlab)) ylimw <- c(0.5, (n+0.75))
par(lend='square')
tud <- (diff(ylimw)/20)/n^0.5
################################################################################
################################################################################
#Walltype 0
if(wall==0){
par(col.axis=tcol)
# Plot
plot(0, 0, type='p', pch='', bg=col, main=main, cex=1, xlim=xlimw, ylim=ylimw, ylab='', xlab='', axes = FALSE, col.main=tcol, col=rgb(1,1,1,0))
if(length(xticks)==1) ticksade<-pretty(c(xmin, xmax), n = xticks, min.n = n %/% 3)
if(length(xticks)>1 ) ticksade<-xticks
onetick<-ticksade[length(ticksade)]-ticksade[length(ticksade)-1]
lastline<-xmax+(0.15*xrange)
abline(v=lastline, col=bgcol, lwd=1)
ys<-seq(n,1)
yz<-seq(0.5, -0.5, length.out=(N+2))
if(N==1) yz<-c(0, 0)
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = bgcol, lty = 1, lwd = 1)
if(hlines & N>1) abline(h=(0:n)+0.5, col=bgcol, lwd=1)
if(legends) lagendram<-legend("topleft", legenlab, pch=c(22,22), col=col, pt.bg=col, horiz=TRUE, bg=rgb(1,1,1, 0), box.col=bgcol, text.col=tcol)
if(!legends) abline(v=v, lty=lty, col=lcol)
if(legends) segments(v, par('usr')[3] , v, par('usr')[4]-lagendram$rect$h, lty=lty, col=lcol)
for(k in 1:N) {
segments(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], col = col[k], lty = 1, lwd = 3)
points(M[[k]][ ,1], ys+yz[k+1], type='p', pch=22, cex=1.1, col=col[k], bg=col[k])
}
if(is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1), labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)+tud, labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)-tud, labels =sectext, pos=4 , col=tcol)
if(logaxe & length(xticks)==1) logis<- ticksade<-pretty(c(exp(xmin), exp(xmax)), n = xticks, min.n = n %/% 3)
if(logaxe & length(xticks)> 1) logis<- xticks
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=bgcol, lwd.ticks=1)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=bgcol)
text( lastline+diff(par('usr')[c(1, 2)])/75 , n+0.5+tud*1.5, labels = rlab, pos=4 , font=2, cex = 1.1, col=tcol)
mtext(xlab, side = 1, at = lastline, adj = 0, padj=1.5, font=1, col=tcol, cex=par('cex.lab'))
mtext(ylab, line=1.5, side = 2, font=1, col=tcol, cex=par('cex.lab'))
box(col=bgcol)
}
################################################################################
################################################################################
################################################################################
################################################################################
#Walltype 1
if(wall==1){
par(col.axis=tcol)
# Plot
plot(0, 0, type='p', pch='', bg=col, main=main, cex=1, xlim=xlimw, ylim=ylimw, ylab='', xlab='', axes = FALSE, col.main=tcol, col=rgb(1,1,1,0))
if(length(xticks)==1) ticksade<-pretty(c(xmin, xmax), n = xticks, min.n = n %/% 3)
if(length(xticks)>1 ) ticksade<-xticks
onetick<-ticksade[length(ticksade)]-ticksade[length(ticksade)-1]
lastline<-xmax+(0.15*xrange)
polygon( c(par('usr')[c(1,1)], lastline, lastline), par('usr')[c(3,4,4,3)], col=bgcol, border=FALSE)
abline(v=v, lty=lty, col=lcol)
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = rgb(1,1,1), lty = 1, lwd = 1)
if(hlines & N>1) segments(par('usr')[1], (0:n)+0.5 ,lastline, (0:n)+0.5, col = rgb(1,1,1), lty = 1, lwd = 1)
if(hlines & N>1) segments(lastline, (0:n)+0.5 ,par('usr')[2], (0:n)+0.5, col = bgcol, lty = 1, lwd = 1)
ys<-seq(n,1)
yz<-seq(0.5, -0.5, length.out=(N+2))
if(N==1) yz<-c(0, 0)
for(k in 1:N) {
segments(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], col = a.coladd.ade(col[k], -75), lty = 1, lwd = 3)
segments(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], col = col[k], lty = 1, lwd = 1)
points(M[[k]][ ,1], ys+yz[k+1], type='p', pch=22, cex=1.1, col=a.coladd.ade(col[k], -75), bg=col[k])
}
if(legends) legend("topleft", legenlab, fill=col, border=a.coladd.ade(col, -75), horiz=TRUE, bg=bgcol, box.col=rgb(1,1,1), box.lwd=2, text.col=tcol)
if(is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1), labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)+tud, labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)-tud, labels =sectext, pos=4 , col=tcol)
if(logaxe & length(xticks)==1) logis<- ticksade<-pretty(c(exp(xmin), exp(xmax)), n = xticks, min.n = n %/% 3)
if(logaxe & length(xticks)> 1) logis<- xticks
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=tcol, lwd.ticks=1)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=tcol)
text( lastline+diff(par('usr')[c(1, 2)])/75 , n+0.5+tud*1.5, labels = rlab, pos=4 , font=2, cex = 1.1, col=tcol)
mtext(xlab, side = 1, at = lastline, adj = 0, padj=1.5, font=1, col=tcol, cex=par('cex.lab'))
mtext(ylab, line=1.5, side = 2, font=1, col=tcol, cex=par('cex.lab'))
box(col=rgb(1,1,1), lwd=1)
}
################################################################################
################################################################################
################################################################################
################################################################################
#Walltype 2
if(wall==2){
par(col.axis=tcol)
# Plot
plot(0, 0, type='p', pch='', bg=col, main=main, cex=1, xlim=xlimw, ylim=ylimw, ylab='', xlab='', axes = FALSE, col.main=tcol, col=rgb(1,1,1,0))
if(length(xticks)==1) ticksade<-pretty(c(xmin, xmax), n = xticks, min.n = n %/% 3)
if(length(xticks)>1 ) ticksade<-xticks
onetick<-ticksade[length(ticksade)]-ticksade[length(ticksade)-1]
lastline<-xmax+(0.15*xrange)
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = bgcol, lty = 1, lwd = 1)
if(hlines & N>1) segments(par('usr')[1], (0:n)+0.5 ,lastline, (0:n)+0.5, col = bgcol, lty = 1, lwd = 1)
if(hlines & N>1) segments(lastline, (0:n)+0.5 ,par('usr')[2], (0:n)+0.5, col = bgcol, lty = 1, lwd = 1)
abline(v=v, lty=lty, col=lcol)
ys<-seq(n,1)
yz<-seq(0.5, -0.5, length.out=(N+2))
if(N==1) yz<-c(0, 0)
for(k in 1:N) {
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = a.coladd.ade(col[k], -75), lty = 1, lwd = 3)
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = col[k], lty = 1, lwd = 1)
points(M[[k]][ ,1], ys+yz[k+1], type='p', pch=22, cex=1.1, col=a.coladd.ade(col[k], -75), bg=col[k])
}
abline(v=lastline, col=a.coladd.ade(bgcol, -75), lwd=1)
if(legends) legend("topleft", legenlab, fill=col, border=a.coladd.ade(col, -75), horiz=TRUE, bg=rgb(1,1,1), box.col=a.coladd.ade(bgcol, -75), box.lwd=1, text.col=tcol, text.width=max(strwidth(legenlab,font = 2)))
if(is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1), labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)+tud, labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)-tud, labels =sectext, pos=4 , col=tcol)
if(logaxe & length(xticks)==1) logis<- ticksade<-pretty(c(exp(xmin), exp(xmax)), n = xticks, min.n = n %/% 3)
if(logaxe & length(xticks)> 1) logis<- xticks
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=a.coladd.ade(bgcol, -75), lwd.ticks=1)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=a.coladd.ade(bgcol, -75))
text( lastline+diff(par('usr')[c(1, 2)])/75 , n+0.5+tud*1.5, labels = rlab, pos=4 , font=2, cex = 1.1, col=tcol)
mtext(xlab, side = 1, at = lastline, adj = 0, padj=1.5, font=1, col=tcol, cex=par('cex.lab'))
mtext(ylab, line=1.5, side = 2, font=1, col=tcol, cex=par('cex.lab'))
box(col=a.coladd.ade(bgcol, -75))
}
################################################################################
################################################################################
################################################################################
################################################################################
#Walltype 3
if(wall==3){
par(col.axis=tcol)
# Plot
plot(0, 0, type='p', pch='', bg=col, main=main, cex=1, xlim=xlimw, ylim=ylimw, ylab='', xlab='', axes = FALSE, col.main=tcol, col=rgb(1,1,1,0))
if(length(xticks)==1) ticksade<-pretty(c(xmin, xmax), n = xticks, min.n = n %/% 3)
if(length(xticks)>1 ) ticksade<-xticks
onetick<-ticksade[length(ticksade)]-ticksade[length(ticksade)-1]
lastline<-xmax+(0.15*xrange)
polygon( c(par('usr')[c(1,1)], lastline, lastline), par('usr')[c(3,4,4,3)], col=bgcol, border=FALSE)
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = rgb(1,1,1), lty = 1, lwd = 1)
if(hlines & N>1) segments(par('usr')[1], (0:n)+0.5 ,lastline, (0:n)+0.5, col = a.coladd.ade(bgcol, -50), lty = 1, lwd = 1)
if(hlines & N>1) segments(lastline, (0:n)+0.5 ,par('usr')[2], (0:n)+0.5, col = a.coladd.ade(bgcol, -50), lty = 1, lwd = 1)
abline(v=v, lty=lty, col=lcol)
ys<-seq(n,1)
yz<-seq(0.5, -0.5, length.out=(N+2))
if(N==1) yz<-c(0, 0)
for(k in 1:N) {
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = a.coladd.ade(col[k], -75), lty = 1, lwd = 3)
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = col[k], lty = 1, lwd = 1)
points(M[[k]][ ,1], ys+yz[k+1], type='p', pch=22, cex=1.1, col=a.coladd.ade(col[k], -75), bg=col[k])
}
abline(v=lastline, col=a.coladd.ade(bgcol, -75), lwd=1)
if(legends) legend("topleft", legenlab, fill=col, border=a.coladd.ade(col, -75), horiz=TRUE, bg=rgb(1,1,1), box.col=a.coladd.ade(bgcol, -75), box.lwd=1, text.col=tcol)
if(is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1), labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)+tud, labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)-tud, labels =sectext, pos=4 , col=tcol)
if(logaxe & length(xticks)==1) logis<- ticksade<-pretty(c(exp(xmin), exp(xmax)), n = xticks, min.n = n %/% 3)
if(logaxe & length(xticks)> 1) logis<- xticks
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=a.coladd.ade(bgcol, -75), lwd.ticks=1)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=a.coladd.ade(bgcol, -75))
text( lastline+diff(par('usr')[c(1, 2)])/75 , n+0.5+tud*1.5, labels = rlab, pos=4 , font=2, cex = 1.1, col=tcol)
mtext(xlab, side = 1, at = lastline, adj = 0, padj=1.5, font=1, col=tcol, cex=par('cex.lab'))
mtext(ylab, line=1.5, side = 2, font=1, col=tcol, cex=par('cex.lab'))
box(col=a.coladd.ade(bgcol, -75))
}
################################################################################
################################################################################
################################################################################
################################################################################
#Walltype 4
if(wall==4){
par(col.axis=tcol)
par(font=2)
# Plot
plot(0, 0, type='p', pch='', bg=col, main=NULL, cex=1, xlim=xlimw, ylim=ylimw, ylab='', xlab='', axes = FALSE, col.main=tcol, col=rgb(1,1,1,0))
if(length(xticks)==1) ticksade<-pretty(c(xmin, xmax), n = xticks, min.n = n %/% 3)
if(length(xticks)>1 ) ticksade<-xticks
onetick<-ticksade[length(ticksade)]-ticksade[length(ticksade)-1]
lastline<-xmax+(0.15*xrange)
polygon( c(par('usr')[c(1,1)], lastline, lastline), par('usr')[c(3,4,4,3)], col=bgcol, border=FALSE)
polygon( c(lastline, lastline, par('usr')[c(2,2)]), par('usr')[c(3,4,4,3)], col=tcol, border=rgb(1,1,1))
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = rgb(1,1,1), lty = 1, lwd = 1)
if(hlines & N>1) segments(par('usr')[1], (0:n)+0.5 ,lastline, (0:n)+0.5, col = rgb(1,1,1), lty = 1, lwd = 1)
if(hlines & N>1) segments(lastline, (0:n)+0.5 ,par('usr')[2], (0:n)+0.5, col = rgb(1,1,1), lty = 1, lwd = 1)
par(xpd=TRUE)
dx<-7/par('din')[1]
dy<-7/par('din')[2]
xr<-(diff(par('usr')[1:2])/11)*dx
yr<-(diff(par('usr')[3:4])/10)*dy
polygon(a.glc(side=c(2,2,4,4), line=c(0,0,0,0)), a.glc(side=3, line=c(0, 2.75, 2.75, 0)), col=tcol, border=rgb(1,1,1))
if(ylab!='' & ylab!=' ') polygon( a.glc(side=2, line=c(2, 2, 0, 0)), a.glc(side=c(1, 3, 3, 1), line=0), col=bgcol, border=rgb(1,1,1))
text(a.glc(side=0), a.glc(side=3, line=1), labels=main, cex = 1.25, font=2, col=rgb(1,1,1), adj=c(0.5,0))
text(a.glc(side=2, line=0.75), a.glc(side=5), labels=ylab, cex = 1.1, font=2, col=tcol, adj=c(0.5,0), srt=90)
par(xpd=FALSE)
abline(v=v, lty=lty, col=lcol)
ys<-seq(n,1)
yz<-seq(0.5, -0.5, length.out=(N+2))
if(N==1) yz<-c(0, 0)
for(k in 1:N) {
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = a.coladd.ade(col[k], -75), lty = 1, lwd = 3)
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = col[k], lty = 1, lwd = 1)
points(M[[k]][ ,1], ys+yz[k+1], type='p', pch=22, cex=1.1, col=a.coladd.ade(col[k], -75), bg=col[k])
}
if(legends) legend("topleft", legenlab, fill=col, border=rgb(1,1,1), horiz=TRUE, bg=tcol, box.col=rgb(1,1,1), box.lwd=1, text.col=rgb(1,1,1), text.width=max(strwidth(legenlab,font = 2)))
if(is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1), labels =vnames, pos=4 , col=rgb(1,1,1))
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)+tud, labels =vnames, pos=4 , col=rgb(1,1,1))
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)-tud, labels =sectext, pos=4 , col=rgb(1,1,1))
if(logaxe & length(xticks)==1) logis<- ticksade<-pretty(c(exp(xmin), exp(xmax)), n = xticks, min.n = n %/% 3)
if(logaxe & length(xticks)> 1) logis<- xticks
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=tcol, lwd.ticks=1)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=tcol)
text( lastline+diff(par('usr')[c(1, 2)])/75 , n+0.5+tud*1.5, labels = rlab, pos=4 , font=2, col=rgb(1,1,1))
mtext(xlab, side = 1, at = lastline, adj = 0, padj=1.5, font=1, col=tcol)
box(col=rgb(1,1,1))
}
################################################################################
################################################################################
################################################################################
################################################################################
#Walltype 5
if(wall==5){
par(col.axis=tcol)
par(col.lab=tcol)
par(col.main=tcol)
newmai<-rep(0, 4)
oldmai<-par('mai')
if(oldmai[2]>0.80 & oldmai[2]<=0.82) newmai[2]<- 0.8-oldmai[3]
if(oldmai[3]>0.75 & oldmai[3]<=0.82) newmai[3]<- 0.75-oldmai[3]
if(oldmai[4]>0.25 & oldmai[4]<=0.42) newmai[4]<- 0.25-oldmai[4]
par(mai=(oldmai+newmai))
# Plot
plot(0, 0, type='p', pch='', bg=col, main=NULL, cex=1, xlim=xlimw, ylim=ylimw, ylab='', xlab='', axes = FALSE, col.main=tcol, col=rgb(1,1,1,0))
if(length(xticks)==1) ticksade<-pretty(c(xmin, xmax), n = xticks, min.n = n %/% 3)
if(length(xticks)>1 ) ticksade<-xticks
onetick<-ticksade[length(ticksade)]-ticksade[length(ticksade)-1]
lastline<-xmax+(0.15*xrange)
par(xpd=TRUE)
dx<-7/par('din')[1]
dy<-7/par('din')[2]
xr<-(diff(par('usr')[1:2])/10)*dx
yr<-(diff(par('usr')[3:4])/10)*dy
polygon(a.glc(side=2, line=c(3.25, 3.25, 0, 0)), a.glc(side=3, line=c(0.6, 3, 3, 0.6)), col=bgcol, border=tcol)
polygon(a.glc(side=c(2,2,4,4), line=c(0,0,0,0)), a.glc(side=3, line=c(0.6, 3, 3, 0.6)), col=rgb(1,1,1,0), border=tcol)
polygon(a.glc(side=4, line=c(0, 0 ,0.6, 0.6)), a.glc(side=3, line=c(0.6, 3, 3, 0.6)), col=bgcol, border=tcol)
polygon(a.glc(side=2, line=c(3.25, 3.25 ,2.65, 2.65)), a.glc(side=c(1,3,3,1), line=c(2.6, 0.6, 0.6, 2.6)), col=bgcol, border=tcol)
polygon(a.glc(side=4, line=c(0, 0 ,0.6, 0.6)), a.glc(side=c(1, 3, 3, 1), line=0), col=bgcol, border=tcol)
polygon(a.glc(side=2, line=c(3.25, 3.25, 0, 0)), a.glc(side=1, line=c(2.6, 4.5, 4.5, 2.6)), col=bgcol, border=tcol)
polygon(a.glc(side=c(2, 2, 4, 4), line=0), a.glc(side=1, line=c(2.6, 4.5, 4.5, 2.6)), col=rgb(1,1,1,0), border=tcol)
polygon(a.glc(side=4, line=c(0, 0, 0.6, 0.6)), a.glc(side=1, line=c(2.6, 4.5, 4.5, 2.6)), col=bgcol, border=tcol)
text(a.glc(side=0), a.glc(side=3, line=1.5), labels=main, cex = 1.25, font=2, col=tcol, adj=c(0.5,0))
text(a.glc(side=0), a.glc(side=1, line=3.75), labels=xlab, cex = 1.1, font=2, col=tcol, adj=c(0.5,0))
text(a.glc(side=2, line=1.5), a.glc(side=5), labels=ylab, cex = 1.1, font=2, col=tcol, adj=c(0.5,0), srt=90)
par(xpd=FALSE)
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = bgcol, lty = 1, lwd = 1)
if(hlines & N>1) segments(par('usr')[1], (0:n)+0.5 ,lastline, (0:n)+0.5, col = bgcol, lty = 1, lwd = 1)
if(hlines & N>1) segments(lastline, (0:n)+0.5 ,par('usr')[2], (0:n)+0.5, col = bgcol, lty = 1, lwd = 1)
abline(v=v, lty=lty, col=lcol)
ys<-seq(n,1)
yz<-seq(0.5, -0.5, length.out=(N+2))
if(N==1) yz<-c(0, 0)
for(k in 1:N) {
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = a.coladd.ade(col[k], -75), lty = 1, lwd = 3)
arrows(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], length = 0.0275, angle = 90, code = 3, col = col[k], lty = 1, lwd = 1)
points(M[[k]][ ,1], ys+yz[k+1], type='p', pch=22, cex=1.1, col=a.coladd.ade(col[k], -75), bg=col[k])
}
abline(v=lastline, col=tcol, lwd=1)
if(legends) legend("topleft", legenlab, fill=col, border=a.coladd.ade(col, -75), horiz=TRUE, bg=rgb(1,1,1), box.col=tcol, box.lwd=1, text.col=tcol)
if(is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1), labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)+tud, labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)-tud, labels =sectext, pos=4 , col=tcol)
if(logaxe & length(xticks)==1) logis<- ticksade<-pretty(c(exp(xmin), exp(xmax)), n = xticks, min.n = n %/% 3)
if(logaxe & length(xticks)> 1) logis<- xticks
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=tcol, lwd.ticks=1)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=tcol)
text( lastline+diff(par('usr')[c(1, 2)])/75 , n+0.5+tud*1.5, labels = rlab, pos=4 , font=2, cex = 1.1, col=tcol)
box(col=tcol)
}
################################################################################
################################################################################
################################################################################
################################################################################
#Walltype 6
if(wall==6){
par(col.axis=tcol)
# Plot
plot(0, 0, type='p', pch='', bg=col, main=main, cex=1, xlim=xlimw, ylim=ylimw, ylab='', xlab='', axes = FALSE, col.main=tcol, col=rgb(1,1,1,0))
if(length(xticks)==1) ticksade<-pretty(c(xmin, xmax), n = xticks, min.n = n %/% 3)
if(length(xticks)>1 ) ticksade<-xticks
onetick<-ticksade[length(ticksade)]-ticksade[length(ticksade)-1]
lastline<-xmax+(0.15*xrange)
polygon( c(par('usr')[c(1,1)], lastline, lastline), par('usr')[c(3,4,4,3)], col=bgcol, border=NA)
abline(v=v, lty=lty, col=lcol)
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = a.coladd.ade(bgcol, -35), lty = 1, lwd = 3)
if(hlines & N==1) segments(par('usr')[1], 1:n ,lastline, 1:n, col = rgb(1,1,1), lty = 1, lwd = 1)
if(hlines & N>1) segments(par('usr')[1], (0:n)+0.5 ,par('usr')[2], (0:n)+0.5, col = a.coladd.ade(bgcol, -35), lty = 1, lwd = 3)
if(hlines & N>1) segments(par('usr')[1], (0:n)+0.5 ,par('usr')[2], (0:n)+0.5, col = rgb(1,1,1), lty = 1, lwd = 1)
ys<-seq(n,1)
yz<-seq(0.5, -0.5, length.out=(N+2))
if(N==1) yz<-c(0, 0)
for(k in 1:N) {
segments(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], col = a.coladd.ade(col[k], -75), lty = 1, lwd = 3)
segments(M[[k]][ ,2], ys+yz[k+1] ,M[[k]][ ,3] , ys+yz[k+1], col = col[k], lty = 1, lwd = 1)
points(M[[k]][ ,1], ys+yz[k+1], type='p', pch=22, cex=1.1, col=a.coladd.ade(col[k], -75), bg=col[k])
}
if(legends) legend("topleft", legenlab, fill=col, border=a.coladd.ade(col, -75), horiz=TRUE, bg=bgcol, box.col=rgb(1,1,1), box.lwd=3, text.col=tcol)
if(legends) legend("topleft", legenlab, fill=col, border=a.coladd.ade(col, -75), horiz=TRUE, bg=rgb(1,1,1,0), box.col=a.coladd.ade(bgcol, -35), box.lwd=1, text.col=tcol)
if(is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1), labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)+tud, labels =vnames, pos=4 , col=tcol)
if(!is.null(sectext)) text( rep(lastline+diff(par('usr')[c(1, 2)])/75 , n) ,seq(n, 1)-tud, labels =sectext, pos=4 , col=tcol)
if(logaxe & length(xticks)==1) logis<- ticksade<-pretty(c(exp(xmin), exp(xmax)), n = xticks, min.n = n %/% 3)
if(logaxe & length(xticks)> 1) logis<- xticks
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=a.coladd.ade(bgcol, -35), lwd.ticks=3)
if(logaxe) axis(1, at=log(logis), labels=logis, col=bgcol, col.ticks=rgb(1,1,1), lwd.ticks=1)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=a.coladd.ade(bgcol, -35), lwd.ticks=3)
if(!logaxe) axis(1, at=ticksade, col=bgcol, col.ticks=rgb(1,1,1), lwd.ticks=1)
text( lastline+diff(par('usr')[c(1, 2)])/75 , n+0.5+tud*1.5, labels = rlab, pos=4 , font=2, cex = 1.1, col=tcol)
mtext(xlab, side = 1, at = lastline, adj = 0, padj=1.5, font=1, col=tcol, cex=par('cex.lab'))
mtext(ylab, line=1.5, side = 2, font=1, col=tcol, cex=par('cex.lab'))
box(col=rgb(1,1,1), lwd=3)
box(col=a.coladd.ade(bgcol, -35), lwd=1)
}
################################################################################
################################################################################
}
|
4936e576441f4445ed2af4cf2e554aa566f4d34e | 3f4ec466b0fb4f3b585b06520f4cc2ebf25c5492 | /man/graph.ran.mean.Rd | f42dccb51afb1965a25aa2a5fb5390cfdced0da6 | [] | no_license | cran/multilevel | aa271d788894adfe7572b6519087e3ec0091d75e | d0c02975a2c319edd3f04b29e24b3b9175a78869 | refs/heads/master | 2022-05-01T03:29:43.293282 | 2022-03-07T22:20:02 | 2022-03-07T22:20:02 | 17,697,743 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,618 | rd | graph.ran.mean.Rd | \name{graph.ran.mean}
\alias{graph.ran.mean}
\title{Graph Random Group versus Actual Group distributions}
\description{Uses random group resampling (RGR) to create a distribution
of pseudo group means. Pseudo group means are then contrasted with
actual group means to provide a visualization of the group-level properties
of the data. It is, in essense, a way of visualizing the ICC1 or an F-Value
from an ANOVA model.}
\usage{
graph.ran.mean(x, grpid, nreps, limits, graph=TRUE, bootci=FALSE)
}
\arguments{
\item{x}{The vector representing the construct of interest.}
\item{grpid}{A vector identifying the groups associated with x.}
\item{nreps}{A number representing the number of random groups to generate.
Because groups are created with the exact size
characteristics of the actual groups, the total number of pseudo groups created
may be calculated as nreps * Number Actual Groups. The value chosen for nreps only
affects the smoothness of the pseudo group line -- values greater than 25 should
provide sufficiently smooth lines. Values of 1000 should be used if the bootci
option is TRUE although only 25 are used in the example to reduce computation time.}
\item{limits}{Controls the upper and lower limits of the y-axis on the plot.
The default is to set the limits at the 10th and 90th percentiles of the raw data.
This option only affects how the data is plotted.}
\item{graph}{Controls whether or not a plot is returned. If graph=FALSE,
the program returns a data frame with two columns. The first column
contains the sorted means from the actual groups, and the second column contains
the sorted means from the pseudo groups. This can be useful for plotting results
in other programs.}
\item{bootci}{Determines whether approximate 95 percent confidence interval estimates
are calculated and plotted. If bootci is TRUE, the nreps option should be 1000 or more.}
}
\value{Produces either a plot (graph=TRUE) or a data.frame (graph=FALSE)}
\references{Bliese, P. D., & Halverson, R. R. (2002). Using random group resampling in
multilevel research. Leadership Quarterly, 13, 53-68.}
\author{ Paul Bliese
\email{[email protected]}}
\seealso{
\code{\link{ICC1}}
\code{\link{mix.data}}
}
\examples{
data(bh1996)
# with the bootci=TRUE option, nreps should be 1000 or more. The value
# of 25 is used in the example to reduce computation time
with(bh1996,graph.ran.mean(HRS,GRP,limits=c(8,16),nreps=25, bootci=TRUE))
GRAPH.DAT<-graph.ran.mean(bh1996$HRS,bh1996$GRP,limits=c(8,16),nreps=25,
graph=FALSE)
}
\keyword{dplot} |
c26aeb3644fc3d7d0f476c8c3a5de1357c27cca7 | 7eef8780fd24ebc5deab2b2f68bf5209c65cd056 | /plot1.R | 15fffe909a9de12a8b6dc0ee4c60d2c741c8031a | [] | no_license | agnecede/ExData_Plotting1 | fd091f10fdc8e66cb195bfe19a4fd8dcbbc09d78 | 3781872f3904fcdbb5c1373735d84202abfa51ce | refs/heads/master | 2020-04-04T00:28:07.144654 | 2014-12-06T13:48:09 | 2014-12-06T13:48:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 980 | r | plot1.R | household_power_consumption_dates <- read.csv("~/Coursera/Exploratory Data Analysis/CourseProject1/household_power_consumption.txt",
sep=";",na.strings = "?",skip=66636,nrows=2880)
names(household_power_consumption_dates) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
household_power_consumption_dates$Date<-as.Date(household_power_consumption_dates$Date,format="%d/%m/%Y")
dt<-paste(household_power_consumption_dates$Date,household_power_consumption_dates$Time)
household_power_consumption_dates$datetime<-strptime(dt,format="%Y-%m-%d %H:%M:%S")
png("~/Coursera/Exploratory Data Analysis/CourseProject1/plot1.png",480,480)
hist(household_power_consumption_dates$Global_active_power,main="Global Active Power",
xlab="Global Active Power (kilowatts)",col="red",breaks=12)
dev.off() |
fe19acf0eca73551cca203f1aeee677c65ea649c | 7804575cd506b4c42defb796dee66fe083061ff9 | /dynamic_ex.R | 88bbea7a9aeb1179f99846325780d35a4fae3972 | [] | no_license | tmastny/reactor | 62b9bf5bacfc46316d82792bdaa817ac2bf8ca77 | ff49938baf0d5e8d403eb39ce7257f6e3efb177e | refs/heads/master | 2020-03-25T00:29:07.550128 | 2019-11-30T16:39:24 | 2019-11-30T16:39:24 | 143,188,063 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 957 | r | dynamic_ex.R | library(shiny)
ui <- fluidPage(
#includeScript('www/enter_bind.js'),
singleton(tags$head(tags$script(src = "enter_bind.js"))),
tags$input(type='command', id='command1', class='reactnb-command',
autocomplete='off', autocorrect='off'
),
tags$br(),
# tags$input(type='command', id='command2', class='reactnb-command',
# autocomplete='off', autocorrect='off'
# ),
verbatimTextOutput("o1"),
verbatimTextOutput("o2"),
verbatimTextOutput("o3")
)
server <- function(input, output) {
output$o1 <- renderPrint({
input$command1
})
output$o2 <- renderPrint({
input$command2
})
output$o3 <- renderPrint({
print(reactiveValuesToList(input))
})
}
runApp(shinyApp(ui, server), launch.browser = TRUE)
# it looks like `input$...` works on element id. However,
# in the example app, the input is of type `shiny-bound-input`.
# I believe this happens with a call to `Shiny.inputBindings.register(...)`
|
90f138e8372fdee05511793ea45e331c03d3de44 | 21ac23387cf8bb8f0dba10571ed89b1300fa6d17 | /funcoes-em-R.R | 87dcc6dea01faa21227a5de305182c6f27851343 | [] | no_license | carlosafs/curso-universidade-mexico-unam | eb4e1b2e3fd38b5f0078f75dfae6166b361ed514 | 0674b4b8f91aa16ffe907e1c3563e4c2cafa8681 | refs/heads/master | 2020-05-22T18:28:47.476574 | 2019-06-07T14:52:37 | 2019-06-07T14:52:37 | 186,471,981 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 577 | r | funcoes-em-R.R |
#funcoes
#exemplo 1:
minha_funcao <- function(x, y, operacao = "soma"){
if (operacao == "soma"){
return(x + y)
}
if (operacao == "subtracao"){
return(x - y)
}
if (operacao == "multiplicacao"){
return(x * y)
}
if (operacao == "divisao"){
return(x / y)
}
}
minha_funcao(2, 2)
minha_funcao(2, 2, "soma")
minha_funcao(2, 2, "subtracao")
minha_funcao(2, 2, "multiplicacao")
minha_funcao(2, 2, "divisao")
#exemplo 2:
maiores <- function(x, limite){
#indices eh um vetor logico
indices <- x > limite
x[indices]
}
a <- 1:50
maiores(a, 25)
|
997f08dad4c2c296f6797b046d4d5becf6912eb0 | 107535e88a0314595086a2502d00a766518bbbe3 | /ui/ui_trans.R | b8bee1b14eaec4079836d24654bc0b0518268173 | [] | no_license | aravindhebbali/exploriment | 1d8b7cc4fe518fa2ade6b22e6fe3cdb4f287c506 | 8cf4e86086751853bd7df7c3dc67699c3ab2b765 | refs/heads/master | 2021-03-16T10:13:51.198920 | 2017-07-10T12:23:43 | 2017-07-10T12:23:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 219 | r | ui_trans.R | tabPanel('Trans', value = 'tab_trans', icon = icon('database'),
navlistPanel(id = 'navlist_trans',
well = FALSE,
widths = c(2, 10),
source('ui/ui_transform2.R', local = TRUE)[[1]]
)
)
|
d78770038ebcc673391f979a632c3bcb820ad496 | 0312ccadd2937b536aaf655a0b35dd8a551b07b9 | /Plot6.R | e7008af599679a3d826543c750e739dd31fa108e | [] | no_license | daiane1989/ASSIGMENT | 78121840fb28c24d55ddd7f3ffaca46891866aaa | 17a56da5ecd0ccaff165b608449ae76e2f21a9f5 | refs/heads/master | 2020-03-27T21:16:21.531262 | 2018-09-03T00:08:25 | 2018-09-03T00:08:25 | 147,130,284 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,445 | r | Plot6.R | setwd("C:/Users/Daiane/Desktop/COURSERA/Exploratory data analysis/semana4")
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(dplyr)
#Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in Los Angeles County, California (
#fips == "06037"\color{red}{\verb|fips == "06037"|}
#fips=="06037"). Which city has seen greater changes over time in motor vehicle emissions?
baltimore_NEI = NEI[NEI$fips=="24510" & NEI$type=="ON-ROAD", ]
la_NEI = NEI[NEI$fips=="06037" & NEI$type=="ON-ROAD", ]
emissionBaltimore = aggregate(Emissions ~ year, baltimore_NEI, sum)
emissionLa = aggregate(Emissions ~ year, la_NEI, sum)
png("plot6.png")
rng = range(emissionBaltimore$Emissions, emissionLa$Emissions)
plot(x = emissionBaltimore$year , y = emissionBaltimore$Emissions,
type = "p", pch = 16, col = "blue",
ylab = "PM2.5 Emission", xlab = "Year", ylim = rng,
main = "Motor vehicle PM2.5 Emission in LA & Baltimore from 1999 to 2008")
lines(x =emissionBaltimore$year, y = emissionBaltimore$Emissions, col = "blue")
points(x = emissionLa$year, y = emissionLa$Emissions, pch = 16, col = "red")
lines(x =emissionLa$year, y = emissionLa$Emission, col = "red")
legend("right", legend = c("LA", "Baltimore"), pch = 20, lty=1, col = c("red", "blue"), title = "City")
dev.off() |
e23ba59343c41efe57f93a2d22dcb0d64403c3de | 00bc0fb8ba893f6cf3a0dcb9f73f6816e267c96e | /CleanHousePrices.R | a22e6769badd615eda2bd2926e5a497a1bfd8ac7 | [] | no_license | oliengist/HousePrices | 3e72c78c3f56d0503e4dfd518bdb1dbf4053c2ec | 4369ca26203ccaa20da848ea1af19f3cda2e70a6 | refs/heads/master | 2021-01-19T21:21:22.753896 | 2017-02-20T04:47:54 | 2017-02-20T04:47:54 | 82,503,946 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,757 | r | CleanHousePrices.R |
# Author: Oliver Engist
# Corresponding dataset: https://www.kaggle.com/c/house-prices-advanced-regression-techniques
# Description: Run this script to produce a cleaned up testing and training dataset of the original
# House Price datasets.
#--------------------------------------------------------------------------------------------------
# Read in the data:
H.train <- read.csv('~/Dropbox/PORTFOLIO/Regression/train.csv',sep=',',header = T)
H.test <- read.csv('~/Dropbox/PORTFOLIO/Regression/test.csv',sep=',',header = T)
#Function to take a data vector and replace all NA with another factor ("None" in our case).
#If the vector is numeric, a message appears and the original vector is returned.
replaceNAWithNewFactor <- function(dataVector,newFactor){
if(class(dataVector)!="numeric"){
data <- as.character(dataVector)
na.idx <- is.na(data)
data[na.idx] <- newFactor
return(as.factor(data))
}else{
print("numeric variable, no NAs replaced")
return(dataVector)
}
}
# Create a vector with all the variable names that should be cleaned up:
columnsToClean <- data.frame("Fence","MiscFeature","FireplaceQu","PoolQC","^Bsmt","^Garage","^Mas")
# The ^ symbol is a wildcard, so all vectors with the pattern "Bsmt" or "Garage" are considered.
# Function to apply the grep function to the vector of column names.
applyGrepToHousing <- function(pattern){
#returns a vector of indices of the columns that have this pattern in the title.
return(grep(pattern,colnames(H.train)))
}
# Apply the new function on the entire vector of patterns we want to consider
# Creates a vector of all column indices that have NAs and need to be cleaned.
col.idx <- unlist(apply(columnsToClean,2,applyGrepToHousing))
# Take this vector of indices and use the replaceNAWithFactor function.
# Testing and training dataset separately.
H.train[,col.idx] <- mapply(replaceNAWithNewFactor,H.train[,col.idx],"none")
H.test[,col.idx] <- mapply(replaceNAWithNewFactor,H.test[,col.idx],"none")
# Replace all missing LotFrontage NAs with the square root of the LotArea
LotFront.na <- is.na(H.train$LotFrontage)
H.train$LotFrontage[LotFront.na] <- sqrt(H.train$LotArea[LotFront.na])
#Replace the single missing Electricity entry with the most frequent one:
electr.na <- is.na(H.train$Electrical)
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
H.train$Electrical[electr.na] <- Mode(H.train$Electrical)
#Resulting numbers of NA per variable:
apply(apply(H.train,2,is.na),2,sum)
# Write data to a new file:
write.table(H.train, file = "~/Dropbox/PORTFOLIO/Regression/train_clean.csv", row.names = FALSE,sep=',')
write.table(H.test, file="~/Dropbox/PORTFOLIO/Regression/test_clean.csv", row.names=FALSE,sep=",")
|
3bfcdfc654f9d9c42054ebc6d6adc766a90e0249 | c9e02a75abbd1d5048446a65aa23b10f79492b2f | /scripts/cheaters.R | a97b4dc2876923ad576fc00fbe2bc7fa24960645 | [] | no_license | somasushma/R-code | f8290d3ecd8ea87ef778b1deb0b7222e84b811be | 2e1f0e05ae56ebe87354caeb374aebb19bf00080 | refs/heads/master | 2021-10-27T14:42:26.847193 | 2021-10-25T22:02:51 | 2021-10-25T22:02:51 | 137,162,116 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 809 | r | cheaters.R | te=c(27, 20, 7, 4, 1,1)
ye=c("European", "Indian", "Chinese", "West Asian", "Korean", "Japanese" )
df=data.frame(cbind(ye,te))
colnames(df)=c("origin", "count")
df$count=as.numeric(as.character(df$count))
df$percentage= round(df$count/sum(te)*100,1)
df$pop=c(197.3, 3.18, 3.79, 10.5, 1.7, 1.3 )
totalpop=sum(df$pop[c(2,3,5,6)])
df$biomed.pop=c(.58,.34*1.8/totalpop,.34*5.2/totalpop, .025, .34*1.67/totalpop, .34*1.3/totalpop)*69000
df$cheaters.ppm=df$count/df$pop
df$cheaters.p1000=df$count/df$biomed.pop*1000
#tabulate
knitr::kable(df[,c(1,2,3,5,7)],format = "pandoc")
xtable::xtable(df[,c(1,2,3,5,7)])
#probability of Indian fraud
pbinom(q=20,size = 4236, prob = 60/69000,lower.tail = F)
#how many times more likely are Indians to commit fraud
4.7219854/((60-20)/(69000-4236)*1000)
|
b34ff97e28c7d0c97cc2d3671c272a3952fd50e2 | 8033c0798ee7f8764b51f55372384a42bfb93c1d | /man/EarningsPerformance.Rd | c6caa09e5ab33b13506a3bf3d9cb8ca7f6828aca | [] | no_license | Texas-UCF/quantkit | 5a2b9d945957b7de02c6c99be6d976a0b167edc9 | cba3c06ddf8d5f73be7813f32d875f79fdf623e1 | refs/heads/master | 2021-01-24T01:14:15.731465 | 2015-11-09T23:51:23 | 2015-11-09T23:51:23 | 42,608,607 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 711 | rd | EarningsPerformance.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/EarningsPerformance.R
\name{EarningsPerformance}
\alias{EarningsPerformance}
\title{Earnings Performance.}
\usage{
EarningsPerformance(ticker, searchStart = Sys.Date() - 365,
searchEnd = Sys.Date(), timePeriod = 7)
}
\arguments{
\item{ticker}{Yahoo Finance target ticker}
\item{searchStart}{Beginning of date range to search in}
\item{searchEnd}{End of date range to search in}
\item{timePeriod}{Period of time before and after earnings report}
}
\value{
data frame with earnings results
}
\description{
Earnings Performance.
}
\examples{
EarningsPerformance("TSLA", 2, 2015)
}
\keyword{earnings}
\keyword{performance}
|
c41a1f398e58e5b2c367b7ea3115818ef265cd8f | d82a996f50f6b553f645af24a6dd1600b19084cf | /lascar_data_analysis/Lascar Duplicate Identification.R | 5d59254c5213b22978d271c522b8ad721061c964 | [] | no_license | ashlinn/GRAPHS_exposure_data | 99f3035d2746b318f42113b3759a33543e83d91a | 9f5923734d00f5a63f66fbc30d318537bd235da9 | refs/heads/master | 2021-01-21T11:18:34.561236 | 2018-04-06T20:03:51 | 2018-04-06T20:03:51 | 91,735,083 | 0 | 0 | null | 2017-05-18T20:30:11 | 2017-05-18T20:30:11 | null | UTF-8 | R | false | false | 3,530 | r | Lascar Duplicate Identification.R | # Lascar file duplicate/problem identification by session
# set path
path <- "~/Dropbox/Ghana_exposure_data_SHARED (1)/Main_study_exposure_assessment"
################# Run script from here to the end
## get Lascar files
files<-list.files(path,recursive=T,pattern="^(CU_CO|CU_C0|CO_USB|COL_USB|CU-CO|CU-C0|CO-USB|COL-USB)", full.names=F)
length(files) #4894
# make a data frame of the files
Lascar_data1 <- data.frame(file = files)
# grab mother and child id info
id_pattern <- "BM....."
Lascar_data1$mstudyid <- regmatches(Lascar_data1$file, regexpr(id_pattern, Lascar_data1$file))
child_pattern <- "BM....C"
Lascar_data1$cstudyid <- regexpr(child_pattern, Lascar_data1$file)
Lascar_data1$cstudyid <- ifelse(Lascar_data1$cstudyid == -1, NA, substr(x = Lascar_data1$file, start = Lascar_data1$cstudyid, stop = Lascar_data1$cstudyid + 6))
# identify those files with ids matching neither mother nor child
Lascar_data1$problem_id <- regmatches(Lascar_data1$file, gregexpr(id_pattern, Lascar_data1$file))
Lascar_data1$problem_id <- ifelse(sapply(Lascar_data1$problem_id, unique, "[[") == Lascar_data1$mstudyid, NA, sapply(Lascar_data1$problem_id, unique, "[["))
Lascar_data1$problem_id <- ifelse(substr(Lascar_data1$problem_id, 15, 21) %in% Lascar_data1$cstudyid & nchar(Lascar_data1$problem_id) < 24, NA, Lascar_data1$problem_id)
# can't parse further
# grab session info
session_pattern <- "s_.."
Lascar_data1$session <- regmatches(Lascar_data1$file, regexpr(session_pattern, Lascar_data1$file))
Lascar_data1$session2 <- regmatches(Lascar_data1$file, gregexpr(session_pattern, Lascar_data1$file))
Lascar_data1$session2 <- ifelse(sapply(Lascar_data1$session2, unique, "[[") == Lascar_data1$session, NA, sapply(Lascar_data1$session2, unique, "[["))
# identify those paths that contain more than 1 session and there is no child
Lascar_data1$problem_session <- ifelse(nchar(as.character(Lascar_data1$session2) > 2) & is.na(Lascar_data1$cstudyid), Lascar_data1$session2, NA)
# get rid of files that are actual duplicates (where 2 monitors were deployed simultaneously)
dupfiles_Lascar <- Lascar_data1[grep("dup", Lascar_data1$file),1] #24
Lascar_data1 <- Lascar_data1[!Lascar_data1$file %in% dupfiles_Lascar,] # row numbers get added here, not sure how to avoid
nrow(Lascar_data1) # 4888
# search for duplicated files
# separate the file name from the full path
Lascar_data1$filename<-basename(as.character(Lascar_data1$file))
# id the duplicates and send to new data frame
dups <- Lascar_data1$filename[which(duplicated(Lascar_data1$filename))]
Lascar_data1$duplicated <- ifelse(Lascar_data1$filename %in% dups, TRUE, FALSE)
sum(Lascar_data1$duplicated) #78
# Lascar_duplicates <- Lascar_data1[Lascar_data1$filename %in% dups,]
# Lascar_duplicates <- Lascar_duplicates[order(Lascar_duplicates$filename), c("mstudyid", "cstudyid", "problem_id", "problem_session", "filename", "file")]
Naming_problems <- Lascar_data1[!is.na(Lascar_data1$problem_id) | !is.na(Lascar_data1$problem_session) | Lascar_data1$duplicated == TRUE, c("mstudyid", "cstudyid", "problem_id", "problem_session", "duplicated", "filename", "file")]
Naming_problems$problem_id <- as.character(Naming_problems$problem_id)
Naming_problems$problem_session <- as.character(Naming_problems$problem_session)
Naming_problems <- Naming_problems[order(Naming_problems$mstudyid),]
# save as .csv
write.csv(Naming_problems, file = paste0("Naming_problems_", format(Sys.Date(), format = "%Y%b%d"), ".csv"), row.names = FALSE)
############################
|
e837e01db2f501783c6604d0ae61ec50edf29835 | 6b451cbe2d5ce230262a6fcbdc22c33561bd3fb6 | /R/R_Functions.r | 897843be81ac5d6c266cb6dbf691fe79001d2e50 | [] | no_license | phattdoan/Code-Base | 9423d794e391439ecde7a500027dda6b3523bffa | f0a988c5cc765d25e5a9c208c02ed82a8b9464bf | refs/heads/master | 2021-03-27T18:23:21.970097 | 2017-09-25T14:45:13 | 2017-09-25T14:45:13 | 102,536,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,294 | r | R_Functions.r | -----------------------------------------------------------------------------------------------
## read CSV
####################################
raws_score = read.csv(file = "Data/RAF2016_v2.csv", header=TRUE, sep=",")
head(raws_score)
-----------------------------------------------------------------------------------------------
## joining table
####################################
members.15.over65.transformed4 = left_join(members.15.over65.transformed3,
members.15.over65, "EMPI")
-----------------------------------------------------------------------------------------------
## Function: bin and aggregate
####################################
fn_bin_vector = function(vec, bin){
vec = sort(vec, decreasing = TRUE)
#str(vec)
bin.sum = rep(0, bin)
idx = 1
for (b in 1:bin){
cumsum = 0
#flag = FALSE
for (i in idx:length(vec)){
cumsum = cumsum + vec[i]
#print(idx)
#print(b)
if (i/length(vec) > (b/bin)){
break
}
}
idx = i+1
bin.sum[b] = cumsum
}
return(bin.sum)
}
-----------------------------------------------------------------------------------------------
## Fucntion to calculate distance between 2 geocodes
####################################
```{r}
get_geo_distance = function(lon1, lat1, lon2, lat2, units = "km") {
distance_Haversine = distm(c(lon1, lat1), c(lon2, lat2), fun = distHaversine)
if (units == "km") {
distance = distance_Haversine / 1000.0
}
else if (units == "miles") {
distance = distance_Haversine / 1609.344
}
#else if (units == "meters"){
# distance = distance_Haversine / 1000000.0
#}
else {
distance = distance_Haversine
# This will return in meter as same way as distHaversine function.
}
distance
}
#distance between one grid
grid_length = get_geo_distance(-113, 42, -112.5, 41.5, "km")
print("Grid length in km: ")
print(grid_length[1])
#QA Script to test the function
#test = get_geo_distance(members$Longitude[1], members$Latitude[1],
# centers$lon[1], centers$lat[1], 'meters')
#test[1]
```
## Calculate distance of each member to each clinic
```{r}
for (i in 1:length(centers$name.cleaned)){
name = centers$name.cleaned[i]
members.15.over65 <- cbind(members.15.over65, name)
members.15.over65$name = 0
print(centers$Name[i])
for (j in 1:length(members.15.over65$Latitude)){
temp = get_geo_distance(centers$lon[i], centers$lat[i],
members.15.over65$Longitude[j],
members.15.over65$Latitude[j],
"meters")
members.15.over65$name[j] = temp[1]
}
colnames(members.15.over65)[(names(members.15.over65)) == "name"] = paste(centers$name.cleaned[i],
"distance",sep=".")
}
colnames(members.15.over65)
# QA Script
#centers$lon[1]
#centers$lat[1]
#members.15$Latitude[1]
#members.15$Longitude[1]
#print(get_geo_distance(centers$lon[1], centers$lat[1],
# members.15$Longitude[1],
# members.15$Latitude[1],
# "meters"))
```
|
2693581b1eb0c25941c54841e16c2e8d55f33a8d | e3cc14879310ef6b79a31e5e391867339e49d27e | /Shiny/server.R | cbc8259552044511e0fb9752622a92ee55a5a237 | [] | no_license | Angel-RC/Twitter | 481b679346b790d14e501f280a59da8cc4d02ff4 | a8ae7be20b16057696eab2b4b76b6973b01b9e14 | refs/heads/master | 2020-03-22T07:28:17.239204 | 2018-07-04T08:42:35 | 2018-07-04T08:42:35 | 130,845,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,595 | r | server.R |
shinyServer(function(input, output, session) {
# Datos ----
# ·······························································································
startTime <- as.numeric(Sys.time())
weatherData <- reactive({
invalidateLater(5000, session)
N <- min((as.numeric(Sys.time()) - startTime)/5 + 30, 1000)
read.csv("Data/weatherdata.csv") %>%
slice(1:N) %>%
mutate(Dates = as.POSIXct(Dates)) %%>
slice((n()-30):n())
})
# Graficas ----
# ·······························································································
output$temp <- renderPlot({
weatherData<- weatherData()
qplot(Dates, Temperature, data = weatherData, geom = "line")
})
output$rain <- renderPlot({
weatherData<- weatherData()
ggplot(data = weatherData(), aes(Dates, Rainfall)) +
geom_bar(stat = "identity")
})
output$maxTemp <- renderValueBox({
weatherData <- weatherData()
maxTemp <- max(weatherData$Temperature, na.rm = TRUE)
valueBox(maxTemp,
subtitle = "Maximum Temperature (celsius)",
icon = icon("arrow-up"),
color = "light-blue")
})
output$minTemp <- renderValueBox({
weatherData <- weatherData()
minTemp <- min(weatherData$Temperature, na.rm = TRUE)
valueBox(minTemp,
subtitle = "Minimum Temperature (celsius)",
icon = icon("arrow-down"),
color = "light-blue")
})
output$averageRainfall <- renderValueBox({
weatherData <- weatherData()
meanRain <- round(mean(weatherData$Rainfall, na.rm = TRUE),1)
valueBox(meanRain,
subtitle = "Average Monthly rainfall (mm)",
icon = icon("cloud"),
color = "orange")
})
# Tablas para visualizar
output$tb1 <- DT::renderDataTable({show_tabla(historico.cuentas)})
output$tb2 <- DT::renderDataTable(show_tabla(historico.tweets))
output$tb3 <- DT::renderDataTable(show_tabla(historico.menciones))
output$tb4 <- DT::renderDataTable(show_tabla(historico.seguidores))
}
)
|
80f8fbe38fec49112f9122081f0888843874f012 | 2dc29399693050696b7dec30b90bcfb00b17cae3 | /r-scripts/Chapter-11.R | 4b1e6c17c1f0b2e55aa6d6cb943cdebb1fefdb76 | [] | no_license | luis8185/comparing-groups | 1b74f64ebbc1363583635b2b4e55c4e67f5b8041 | 57731d07f3307c0161d4077d99b0b124a4a33c37 | refs/heads/master | 2021-12-22T08:59:39.707618 | 2017-09-13T23:00:51 | 2017-09-13T23:00:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,199 | r | Chapter-11.R | ###################################################
### Comparing Groups: Randomization and Bootstrap Methods Using R
### Andrew S. Zieffler, Jeffrey Harring, and Jeffrey D. Long
### December 05, 2010
### Chapter 11: Planned Contrasts
###################################################
###################################################
### Command Snippet 11.1: Read in and Examine the Diet.csv Data
###################################################
diet <- read.table(file = file.choose(), header = TRUE, sep = ",", row.names = "ID")
head(diet)
tail(diet)
str(diet)
summary(diet)
###################################################
### Command Snippet 11.2: Examine Research Question 1
###################################################
diet1 <- subset(x = diet, subset = Diet == "Atkins" | Diet == "Ornish")
plot(density(diet1$WeightChange[diet1$Diet == "Ornish"]), main = " ", xlab = "12-Month Weight Change", xlim = c(-80, 40), bty = "l")
lines(density(diet1$WeightChange[diet1$Diet == "Atkins"]), lty = "dashed")
legend(x = -75, y = 0.025, legend = c("Atkins", "Ornish"), lty = c("dashed", "solid"), bty = "n")
tapply(X = diet1$WeightChange, INDEX = diet1$Diet, FUN = mean)
tapply(X = diet1$WeightChange, INDEX = diet1$Diet, FUN = sd)
###################################################
### Command Snippet 11.3: Examine Research Question 2
###################################################
diet2 <- diet
levels(diet2$Diet) <- c("Atkins", "Others", "Others", "Others")
plot(density(diet2$WeightChange[diet2$Diet == "Others"]), main = " ", xlab = "12-Month Weight Change", xlim = c(-80, 40), bty = "l")
lines(density(diet2$WeightChange[diet2$Diet == "Atkins"]), lty = "dashed")
legend(x = -75, y = 0.030, legend = c("Atkins", "Others"), lty = c("dashed", "solid"), bty = "n")
tapply(X = diet2$WeightChange, INDEX = diet2$Diet, FUN = mean)
tapply(X = diet2$WeightChange, INDEX = diet2$Diet, FUN = sd)
###################################################
### Command Snippet 11.4: Examine Research Question 3
###################################################
diet3 <- diet
levels(diet3$Diet) <- c("CR", "BM","BM","CR")
plot(x = density(x = diet3$WeightChange[diet3$Diet == "BM"]), main = " ", xlab = "12-Month Weight Change", xlim = c(-80, 45), bty = "l")
lines(density(diet3$WeightChange[diet3$Diet == "CR"]), lty = "dashed")
legend(x = -75, y = 0.030, legend = c("Carbohydrate Restrictive", "Behavior Modification"), lty = c("dashed", "solid"), bty = "n")
tapply(X = diet3$WeightChange, INDEX = diet3$Diet, FUN = mean)
tapply(X = diet3$WeightChange, INDEX = diet3$Diet, FUN = sd)
###################################################
### Command Snippet 11.5: Compute Estimate of Contrast 1
###################################################
levels(diet$Diet)
con1 <- c(1, 0, -1, 0)
tapply(X = diet$WeightChange, INDEX = diet$Diet, FUN = mean) * con1
sum(tapply(X = diet$WeightChange, INDEX = diet$Diet, FUN = mean) * con1)
###################################################
### Command Snippet 11.6: Compute Estimate of Contrast 2 and Contrast 3
###################################################
con2 <- c(3,-1,-1,-1)
con3 <- c(1,-1,-1,1)
sum(tapply(X = diet$WeightChange, INDEX = diet$Diet, FUN = mean) * con2)
sum(tapply(X = diet$WeightChange, INDEX = diet$Diet, FUN = mean) * con3)
###################################################
### Command Snippet 11.7: Randomization Test for Contrast 1
###################################################
set.seed(100)
permuted <- replicate(n = 4999, expr = sample(diet$WeightChange))
contrast.1 <- function(data) {
(1) * mean(data[1:60]) +
(0) * mean(data[61:120]) +
(-1) * mean(data[121:180]) +
(0) * mean(data[181:240]);
}
contrast.1(diet$WeightChange)
perm.contrasts.1 <- apply(X = permuted, MARGIN = 2, FUN = contrast.1)
plot(density(perm.contrasts.1))
mean(perm.contrasts.1)
sd(perm.contrasts.1)
length(diffs[abs(perm.contrasts.1) >= 8.6])
(4 + 1) / (4999 + 1)
###################################################
### Command Snippet 11.8: Randomization Test for Contrast 2
###################################################
contrast.2 <- function(data) {
(3) * mean(data[1:60]) +
(-1) * mean(data[61:120]) +
(-1) * mean(data[121:180]) +
(-1) * mean(data[181:240]);
}
contrast.2(diet$WeightChange)
perm.contrasts.2 <- apply(X = permuted, MARGIN = 2, FUN = contrast.2)
plot(density(perm.contrasts.2))
mean(perm.contrasts.2)
sd(perm.contrasts.2)
length(perm.contrasts.2[abs(perm.contrasts.2) >= 24.7])
(0 + 1) / (4999 + 1)
###################################################
### Command Snippet 11.9: Randomization Test for Contrast 3
###################################################
contrast.3(diet$WeightChange)
perm.contrasts.3 <- apply(X = permuted, MARGIN = 2, FUN = contrast.3)
plot(density(perm.contrasts.3))
mean(perm.contrasts.3)
sd(perm.contrasts.3)
length(perm.contrasts.3[abs(perm.contrasts.3) >= 6.3])
(477 + 1) / (4999 + 1)
###################################################
### Command Snippet 11.10: Computation of Total Sum of Squares
###################################################
var(diet$WeightChange) * (length(diet$WeightChange) - 1)
###################################################
### Command Snippet 11.11: Computation of Sum of Squares for Contrast 1
###################################################
psi <- sum(tapply(X = diet$WeightChange, INDEX = diet$Diet, FUN = mean) * con1)
numerator <- psi ^ 2
numerator
denominator <- sum(con1 ^ 2 / table(diet$Diet))
denominator
numerator / denominator
###################################################
### Command Snippet 11.12: Computation of Sum of Squares for Contrast 2 and 3
###################################################
numerator <- sum(tapply(X = diet$WeightChange, INDEX = diet$Diet, FUN = mean) * con2) ^ 2
denominator <- sum(con2 ^ 2 / table(diet$Diet))
numerator / denominator
18317 / 49960
numerator <- sum(tapply(X = diet$WeightChange, INDEX = diet$Diet, FUN = mean) * con3) ^ 2
denominator <- sum(con3 ^ 2 / table(diet$Diet))
numerator / denominator
590 / 49960
###################################################
### Command Snippet 11.13: Nonparametric Bootstrap Interval for Eta-Squared (Contrast 1)
###################################################
eta.squared <- function(data, indices) {
d <- data[indices,]
num <- sum(tapply(X = d$WeightChange, INDEX = d$Diet, FUN = mean) * con1) ^ 2
den <- sum(con1 ^ 2 / table(d$Diet))
SS.con <- num / den
SS.tot <- var(d$WeightChange) * 239
SS.con / SS.tot
}
eta.squared(diet)
library(boot)
nonpar.boot <- boot(data = diet, statistic = eta.squared, R = 4999, strata = diet$Diet)
plot(density(nonpar.boot$t), xlab = "Bootstrapped Eta-Squared", main = " ")
nonpar.boot
boot.ci(boot.out = nonpar.boot, type = "bca")
###################################################
### Command Snippet 11.14: Check Orthogonality
###################################################
sum(con1 * con2)
sum(con1 * con3)
sum(con2 * con3)
###################################################
### Command Snippet 11.15: Read in and Examine the WordRecall.csv Data, Compute and Plot the Conditional Distributions
###################################################
word <- read.table(file = file.choose(), header = TRUE, sep = ",", row.names = "ID")
head(word)
tail(word)
str(word)
summary(word)
plot(x = word$Study, y = word$Recall, xlab = "Time Spent Studying", ylab = "Number of Words Recalled", xlim=c(0.4, 3.4), ylim = c(0, 30), bty = "l", pch = 20)
boxplot(word$Recall[word$Study == 1], word$Recall[word$Study == 2], word$Recall[word$Study == 3], at = c(1:3), add = TRUE, axes = FALSE, boxwex = 0.2, col = rgb(red = 0.2, green = 0.2, blue = 0.2, alpha = 0.3))
tapply(X = word$Recall, INDEX = word$Study, FUN = mean)
tapply(X = word$Recall, INDEX = word$Study, FUN = sd)
table(word$Study)
###################################################
### Command Snippet 11.16: Compute the Polynomial Contrasts
###################################################
poly.cont <- contr.poly(3)
poly.cont
con.linear <- poly.cont[ ,1]
con.linear
con.quad <- poly.cont[ ,2]
con.quad
|
67783af7d9551699952dedbc2f17451eef649964 | c4f324c98487791c39f550645743e2b5dad3b52a | /man/make_bed.Rd | 541af206012711959e601ad2c34ecf4713dcfede | [] | no_license | russelnelson/GeneArchEst | 06219a1d222087b13f662739f60ebe513aa2bc1f | 0d126aae50f9b68ee2c36ea6c6d9ba16e7c41a9c | refs/heads/master | 2023-03-13T00:48:25.816747 | 2021-03-04T23:08:15 | 2021-03-04T23:08:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,646 | rd | make_bed.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{make_bed}
\alias{make_bed}
\title{Make bed files from genotype/phenotype data}
\usage{
make_bed(
x,
meta,
phenos,
plink_path = "/usr/bin/plink.exe",
return_objects = F,
missing_genotypes = -9
)
}
\arguments{
\item{x}{matrix or object coercable to a matrix. Genotypes, in phased format (two columns per individual).
Genotypes should be in single number format (0 or 1), such as produced by \code{\link{process_ms}}.}
\item{meta}{data.frame of object coercable to data.frame. Metadata for snp data, where column 1 is the chromsome
and column 2 is the position on the chromosome in bp,such as produced by \code{\link{process_ms}}.}
\item{phenos}{character vector. A vector containing the phenotypes for each individual,
sorted identically to individuals in x.}
\item{return_objects}{logical, default FALSE, If TRUE, returns a list with ped, bed, map, and bim data. Otherwise just saves
files.}
\item{missing_genotypes}{numeric, default -9. Encoding for missing alleles/genotypes.}
}
\value{
Either NULL or a list containing ped, bed, map, and bim formatted data.
}
\description{
From input genotypes, metadata, and phenotypes, creates a bed, ped, map, and bim file
for use in PLINK, GCTA, or other programs. Files of each type will be written to the current
working directory with the "data" prefix. Note that file named "data.sh" will be written which
must be ran via bash command line to produce the .bed binary file.
}
\references{
Source code pulled from the snpR package by the same author for use here.
}
\author{
William Hemstrom
}
|
887c7689199589b41bd032258e902e5a30f6862c | 0d1ef2cb3818bcadd602a0f46cbfec7ed55a9aef | /h2o_3.10.4.4/h2o/man/h2o.tanh.Rd | 928a5e0b9b5b63c56d6e3f820c142548be2c2207 | [] | no_license | JoeyChiese/gitKraken_test | dd802c5927053f85afba2dac8cea91f25640deb7 | 3d4de0c93a281de648ae51923f29caa71f7a3342 | refs/heads/master | 2021-01-19T22:26:29.553132 | 2017-04-20T03:31:39 | 2017-04-20T03:31:39 | 88,815,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 317 | rd | h2o.tanh.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{h2o.tanh}
\alias{h2o.tanh}
\title{Compute the hyperbolic tangent of x}
\usage{
h2o.tanh(x)
}
\arguments{
\item{x}{An H2OFrame object.}
}
\description{
Compute the hyperbolic tangent of x
}
\seealso{
\code{\link[base]{tanh}} for the base R implementation.
}
|
aa6e8b6bc95c3f03d34d237c0725bc21c443fde0 | 4c3ff90922b2fa72e82e7ab9a3ba8e7c8ad51113 | /code/rnaseq_code/relative_coverage_plots.R | 5d9526c01755d4a8740f0abd17a95b1a03b8ea81 | [
"MIT"
] | permissive | felixgrunberger/pyrococcus_reannotation | cdb6390aa160c879599ddcc92986ed8491ae3af2 | d3fb45473bb0c92a2edf99ab770ac8f32e69e55c | refs/heads/master | 2020-04-29T18:04:05.504125 | 2019-10-22T08:18:24 | 2019-10-22T08:18:24 | 176,313,200 | 0 | 0 | MIT | 2019-10-22T08:18:02 | 2019-03-18T15:16:47 | R | UTF-8 | R | false | false | 11,590 | r | relative_coverage_plots.R | # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# Average read coverage of 3 data sets in relative position to 4 TSS
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# load libraries
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
library(CoverageView)
library(data.table)
library(tidyverse)
library(here)
library(stringr)
library(viridis)
library(Biobase)
library(ggthemes)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# theme for plotting
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
theme_Publication <- function(base_size=14) {
(theme_foundation(base_size=base_size, base_family="Helvetica")
+ theme(plot.title = element_text(face = "bold",
size = rel(1.2), hjust = 0.5),
text = element_text(),
panel.background = element_rect(colour = NA),
plot.background = element_rect(colour = NA),
panel.border = element_rect(colour = NA),
axis.title = element_text(face = "bold",size = rel(1)),
axis.title.y = element_text(angle=90,vjust =2),
axis.title.x = element_text(vjust = -0.2),
axis.text = element_text(),
axis.line = element_line(colour="black"),
axis.ticks = element_line(),
panel.grid.major = element_line(colour="#f0f0f0"),
panel.grid.minor = element_blank(),
legend.key = element_rect(colour = NA),
legend.position = "bottom",
legend.direction = "horizontal",
legend.key.size= unit(0.2, "cm"),
legend.spacing = unit(0, "cm"),
legend.title = element_text(face="italic"),
plot.margin=unit(c(10,5,5,5),"mm"),
strip.background=element_rect(colour="#f0f0f0",fill="#f0f0f0"),
strip.text = element_text(face="bold")
))
}
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# load data
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# >> strand specific wig files (input for ANNOgesic) were converted using wigToBigWig (https://anaconda.org/bioconda/ucsc-wigtobigwig)
# >> load TSS and write to bed file for each category
get_tss_bed <- function(what_type, strand){
return(fread(input = here("data/annogesic_data/CP023154_TSS.gff"),skip = 1) %>%
as.data.table() %>%
dplyr::filter(V7 == strand) %>%
mutate(type = str_split(V9, ";",n = 7, simplify = T)[,3]) %>%
dplyr::filter(type == paste("type=",what_type, sep="")) %>%
dplyr::mutate(chr = V1,
start = V4,
end = V5,
name = type) %>%
dplyr::select(chr,start,end,name))
}
# >> write positions to table
write.table(get_tss_bed("Primary", "+"), file = here("data/annogesic_data/CP023154_TSS_Primary_p.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
write.table(get_tss_bed("Secondary","+"), file = here("data/annogesic_data/CP023154_TSS_Secondary_p.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
write.table(get_tss_bed("Internal", "+"), file = here("data/annogesic_data/CP023154_TSS_Internal_p.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
write.table(get_tss_bed("Antisense", "+"),file = here("data/annogesic_data/CP023154_TSS_Antisense_p.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
write.table(get_tss_bed("Primary", "-"), file = here("data/annogesic_data/CP023154_TSS_Primary_m.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
write.table(get_tss_bed("Secondary","-"), file = here("data/annogesic_data/CP023154_TSS_Secondary_m.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
write.table(get_tss_bed("Internal", "-"), file = here("data/annogesic_data/CP023154_TSS_Internal_m.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
write.table(get_tss_bed("Antisense", "-"),file = here("data/annogesic_data/CP023154_TSS_Antisense_m.gff.bed"),row.names = F, col.names = F, quote = F, sep = "\t")
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# calculate average read density for each TSS category
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
tss_coverage <- function(seq_type, tss_type, filtered = F){
# load bw file for each sequencing type
inputfile_f <- paste(here("data/annogesic_data/input/"),seq_type,"_in_CP023154_plus.bw", sep = "")
inputfile_r <- paste(here("data/annogesic_data/input/"),seq_type,"_in_CP023154_minus.bw", sep = "")
# use coverageBigWig to calculate coverage
trm_f <- CoverageBigWigFile(inputfile_f)
trm_r <- CoverageBigWigFile(inputfile_r)
# make a strand specific count matrix
pfu_cov_f_sense <- t(cov.matrix(trm_f,coordfile=paste(here("data/annogesic_data/CP023154_TSS_"),tss_type,"_p.gff.bed",sep = ""),extend=400,num_cores=2, bin_width=10))
pfu_cov_f_anti <- t(cov.matrix(trm_r,coordfile=paste(here("data/annogesic_data/CP023154_TSS_"),tss_type,"_p.gff.bed",sep = ""),extend=400,num_cores=2, bin_width=10))
pfu_cov_r_anti <- t(-cov.matrix(trm_f,coordfile=paste(here("data/annogesic_data/CP023154_TSS_"),tss_type,"_m.gff.bed",sep = ""),extend=400,num_cores=2, bin_width=10))
pfu_cov_r_sense <- t(-cov.matrix(trm_r,coordfile=paste(here("data/annogesic_data/CP023154_TSS_"),tss_type,"_m.gff.bed",sep = ""),extend=400,num_cores=2, bin_width=10))
# combine values for both strands
pfu_cov_sense <- rbind(pfu_cov_f_sense, pfu_cov_r_sense[,c(ncol(pfu_cov_r_sense):1)])
pfu_cov_anti <- rbind(pfu_cov_f_anti, pfu_cov_r_anti[,c(ncol(pfu_cov_r_anti):1)])
# scale values by dividing each value/sum(values)
freq_sense <- scale(t(pfu_cov_sense), center = FALSE,
scale = colSums(t(pfu_cov_sense)))
freq_anti <- scale(t(pfu_cov_anti), center = FALSE,
scale = colSums(t(pfu_cov_anti)))
# counts were calculated in a window from -400 to +400 to a TSS in a 10 bp window
line <- seq(from = -400+5, to = 400-5, by = 10)
sense <- rowMeans(freq_sense)
anti <- -rowMeans(freq_anti)
data_TLE <- matrix(ncol = 4, nrow = 400/10*2)
data_TLE[,1] <- line
data_TLE[,2] <- sense
data_TLE[,3] <- anti
data_TLE[,4] <- seq_type
data_TLE <- as.data.table(data_TLE)
colnames(data_TLE) <- c("number", "SENSE", "ANTISENSE", "TYPE")
return(data_TLE)
}
# >> calculate coverage & format for ggploting, primary
frag_cov_p <- tss_coverage(seq_type = "FRAG", tss_type = "Primary")
tex_cov_p <- tss_coverage(seq_type = "plus_TEX", tss_type = "Primary")
notex_cov_p <- tss_coverage(seq_type = "minus_TEX", tss_type = "Primary")
# >> calculate coverage & format for ggploting, secondary
frag_cov_s <- tss_coverage(seq_type = "FRAG", tss_type = "Secondary")
tex_cov_s <- tss_coverage(seq_type = "plus_TEX", tss_type = "Secondary")
notex_cov_s <- tss_coverage(seq_type = "minus_TEX", tss_type = "Secondary")
# >> calculate coverage & format for ggploting, internal
frag_cov_i <- tss_coverage(seq_type = "FRAG", tss_type = "Internal")
tex_cov_i <- tss_coverage(seq_type = "plus_TEX", tss_type = "Internal")
notex_cov_i <- tss_coverage(seq_type = "minus_TEX", tss_type = "Internal")
# >> calculate coverage & format for ggploting, antisense
frag_cov_a <- tss_coverage(seq_type = "FRAG", tss_type = "Antisense")
tex_cov_a <- tss_coverage(seq_type = "plus_TEX", tss_type = "Antisense")
notex_cov_a <- tss_coverage(seq_type = "minus_TEX", tss_type = "Antisense")
# >> combine three sequencing data sets for each frame
cov_p <- rbind(frag_cov_p, tex_cov_p, notex_cov_p) %>%
mutate(number = as.numeric(number),
SENSE = as.numeric(SENSE),
ANTISENSE = as.numeric(ANTISENSE))
cov_s <- rbind(frag_cov_s, tex_cov_s, notex_cov_s) %>%
mutate(number = as.numeric(number),
SENSE = as.numeric(SENSE),
ANTISENSE = as.numeric(ANTISENSE))
cov_i <- rbind(frag_cov_i, tex_cov_i, notex_cov_i) %>%
mutate(number = as.numeric(number),
SENSE = as.numeric(SENSE),
ANTISENSE = as.numeric(ANTISENSE))
cov_a <- rbind(frag_cov_a, tex_cov_a, notex_cov_a) %>%
mutate(number = as.numeric(number),
SENSE = as.numeric(SENSE),
ANTISENSE = as.numeric(ANTISENSE))
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# plot coverage files
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# >> primary
pdf(here("figures/rnaseq_figures/coverage_3primary.pdf"),
width = 3.5, height = 3.5, paper = "special",onefile=FALSE)
ggplot() +
stat_smooth(data = cov_p, aes(x = number, y= SENSE, fill = TYPE), geom = "area", method = "loess", span = 0.07) +
stat_smooth(data = cov_p, aes(x = number, y= SENSE, color = TYPE, linetype= TYPE), geom = "line", method = "loess", span = 0.07, size = 1) +
theme_Publication() +
scale_linetype_manual(values=c("solid","twodash", "dotted")) +
scale_fill_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 0.3) +
scale_color_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 1) +
labs(x = "Position to pTSS [nt]", y = "relative \ncoverage") +
geom_vline(xintercept = c(0), linetype = "longdash", alpha = 0.5)
dev.off()
# >> secondary
pdf(here("figures/rnaseq_figures/coverage_3sec.pdf"),
width = 3.5, height = 3.5, paper = "special",onefile=FALSE)
ggplot() +
stat_smooth(data = cov_s, aes(x = number, y= SENSE, fill = TYPE), geom = "area", method = "loess", span = 0.07) +
stat_smooth(data = cov_s, aes(x = number, y= SENSE, color = TYPE, linetype= TYPE), geom = "line", method = "loess", span = 0.07, size = 1) +
theme_Publication() +
scale_linetype_manual(values=c("solid","twodash", "dotted")) +
scale_fill_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 0.3) +
scale_color_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 1) +
labs(x = "Position to sTSS [nt]", y = "relative \ncoverage") +
geom_vline(xintercept = c(0), linetype = "longdash", alpha = 0.5)
dev.off()
# >> internal
pdf(here("figures/rnaseq_figures/coverage_3internal.pdf"),
width = 3.5, height = 3.5, paper = "special",onefile=FALSE)
ggplot() +
stat_smooth(data = cov_i, aes(x = number, y= SENSE, fill = TYPE), geom = "area", method = "loess", span = 0.07) +
stat_smooth(data = cov_i, aes(x = number, y= SENSE, color = TYPE, linetype= TYPE), geom = "line", method = "loess", span = 0.07, size = 1) +
theme_Publication() +
scale_linetype_manual(values=c("solid","twodash", "dotted")) +
scale_fill_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 0.3) +
scale_color_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 1) +
labs(x = "Position to iTSS [nt]", y = "relative \ncoverage") +
geom_vline(xintercept = c(0), linetype = "longdash", alpha = 0.5)
dev.off()
# >> antisense
pdf(here("figures/rnaseq_figures/coverage_3antisense.pdf"),
width = 3.5, height = 3.5, paper = "special",onefile=FALSE)
ggplot() +
stat_smooth(data = cov_a, aes(x = number, y= SENSE, fill = TYPE), geom = "area", method = "loess", span = 0.07) +
stat_smooth(data = cov_a, aes(x = number, y= SENSE, color = TYPE, linetype= TYPE), geom = "line", method = "loess", span = 0.07, size = 1) +
theme_Publication() +
scale_linetype_manual(values=c("solid","twodash", "dotted")) +
scale_fill_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 0.3) +
scale_color_viridis(discrete = T, begin = 0.2, end = 0.6, alpha = 1) +
labs(x = "Position to aTSS [nt]", y = "relative \ncoverage") +
geom_vline(xintercept = c(0), linetype = "longdash", alpha = 0.5)
dev.off()
|
773b8a9e28edcee6102a17ae68541b611e4af33b | 400a462eff41e452bce0a7c387b996de717b523a | /olderCode/tests/bestFileAndNumComponentsForEachAnalyte.R | c8f1db25d35f812da54472bcd3da210be2021233 | [] | no_license | BirgandLab/Brittany | 6740de2ad254874860d4f1eaa8cd09cf241f2e7d | 25c2658b1742f9460087d99ae3008f6f679f6564 | refs/heads/master | 2020-05-18T00:27:54.544829 | 2015-04-03T17:54:00 | 2015-04-03T17:54:00 | 20,567,831 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,508 | r | bestFileAndNumComponentsForEachAnalyte.R | library(oce) #used for despiking
library(hydroGOF) #forgot what I used this for
library(pls) #Load the pls package
#******Specify file paths and names
inPath<-"C:/Users/FBLab/Documents/GitHub/Brittany/inputFiles/" #Specify folder where data is located
outPath<-"C:/Users/FBLab/Documents/GitHub/Brittany/output/"
fitPath<-"C:/Users/FBLab/Downloads/FITEVAL2_win/FITEVAL2_win/" #fiteval_out.txt"
#source<-
fitEval<-paste(fitPath,"fiteval",sep="")
fitFile<-paste(fitPath,"PLSR.in",sep="")
fitFileOut<-paste(fitPath,"PLSR_out.txt",sep="")
filename<-c("OriginalBrittany.csv" ,"Brittany1stDerative.csv","TubidityCompensatedBrittany.csv","TurbidityCompensated1stDerivativeBrittany.csv")
#store names for the lab analytes
Chem<-c("CL", "NO2", "NNO2","NO3","NNO3","SO4","DOC","DIC","UV254", "PPO43","Ptot", "MES",
"NNH4", "Ntot", "NTotFilt", "Silica", "Turbidity");
bestFitnComps<-c(16,10,12,4,5,20,5,16,5,16,5,5,6,4,3,11,10)
bestFitFiles<-c(2,2,2,2,3,4,2,1,1,3,3,3,3,2,2,2,2)
fitQuality<-matrix(nrow=17,ncol=14)
colnames(fitQuality)<-c("calibrationVG","calibrationG","calibratinA","calibrationB","calibFitVG","calibFitG","calibFitA","calibFitB","allDataVG","allDataG","allDataA","allDataB","i","j")
Stats<-matrix(nrow=17,ncol=17)
colnames(Stats)<-c("n","r2","rmse","nrmse","slope","n","r2","rmse","nrmse","slope","n","r2","rmse","nrmse","slope","i","j")
#read the data specified by the vector filename
counter<-1
Components<-matrix(nrow=70,ncol=4)
#load data for checking number of components on
for (chem in 1:17){
Comps<-bestFitnComps[chem]
fn<-bestFitFiles[chem]
#all chem analytes are listed in different columns
#1 CL 2 NO2 3 NNO2 4 NO3 5 NNO3 6 SO4
#7 DOC 8 DIC 9 UV254 10 PPO43 11 Ptot 12 MES
#13 NNH4 14 Ntot 15 NTotFilt 16 Silic 17 Turb
# jpeg(file=paste(outPath,".",Chem[chem],"best.jpg",sep=""))
myData<-loadDataFile(inPath,filename[fn])
#data are returned in a list myData$fingerPrints
# myData$realTime
# myData$ChemData
#pull out the column of chem data of interest
ChemConc<-as.matrix(myData$ChemData[,chem]) #take out the only one we care about
fp<-cbind(myData$fingerPrints,myData$realTime,as.matrix(ChemConc)) #bind the two matrices together to determine complete cases
fp<-fp[complete.cases(fp[,2:dim(fp)[2]]),] #just keep the fingerprints that have analytical data
ChemConc<-as.matrix(fp[,dim(fp)[2]]) #pull chem back out
fp<-fp[,-dim(fp)[2]] #pull off the fingerprint
realTime<-as.matrix(fp[,dim(fp)[2]]) #pull real time off (now the last column)
fp<-fp[,-dim(fp)[2]]
#pop it off the end of the dataframe
Comps<-bestFitnComps[chem]
fn<-bestFitFiles[chem]
Comps<-numberOfComponentsToUse(fp,ChemConc)
#Calculate Nash-Sutcliffe based goodness of fit
fitFile<-paste(fitPath,Chem[chem],"2","PLSR.in",sep="")
fitFileOut<-paste(fitPath,Chem[chem],"2","PLSR_out.txt",sep="")
Output<-PLSRFitAndTest(fp,ChemConc,realTime,Comps,fitEval,fitFile,fitFileOut,0)
fitQuality[chem,1:6]<-c(Output$fitQuality,Comps,chem)
Stats[chem,1:7]<-c(Output$Stats,Comps,chem)
#dev.off()
} #for each chemical |
066f29f667c33ba6248eeacdad279a2f982ee12a | 5bc791cca37dfb30d845fdf2beaaff775e1739e5 | /OReilly learning R/Chapter 5 Lists and Data Frames.R | fe0f2202c1ce41d86ff5791994c8fee7fdcce357 | [] | no_license | AScheuss/Rlearning | 061e46d269f9f8bf8a350f70ed07207326640918 | 403b36c36ef2dd1f11f4b9496f5a9ad9389ca8f7 | refs/heads/master | 2021-01-01T17:28:03.748341 | 2017-10-09T09:49:53 | 2017-10-09T09:49:53 | 22,141,049 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,240 | r | Chapter 5 Lists and Data Frames.R | #### Chapter 5 Lists and Data Frames
## Lists and data frames let us combine different types
### Lists (Vectors with different types)
# Creating lists
(a_list <- list(9.4,'total', c('cool','wicked','nice'),c(3,45,34),asin,month.abb))
# Naming works as with vectors
names(a_list) <- c('a number', 'a string', 'nice strings', 'some integers', 'arcsin', 'months')
a_list
(some_other_list <- list(
"a string" = 'something similar',
"an integer" = 9,
"some numbers" = c(4,32.3,5)))
(list_of_lists <- list(list(c(4,3),'na so was'), 9, list(5,4,'cool')))
## Atomic and Recursive variables
# Because we can construct list of lists we say that list is a recursive variable
# The contrary expression is 'atomic'
# Every variable is either atomic or recursive, never both
# One can test this with the 'is.recursive' or 'is.atomic' functions
is.recursive(some_other_list)
is.atomic(some_other_list)
is.recursive(c(3,6,5))
is.atomic(c(3,6,5))
# Also length, dim, ncol/NCOL and nrow/NROW the same as with vectors (in particular dim does not work...)
length(a_list)
length(list_of_lists)
dim(a_list)
ncol(a_list)
NCOL(a_list)
nrow(a_list)
NROW(a_list)
# Arithmetic on lists does not work (obviously as the elements are of different types...)
# One has to access the elements of the list itself
a_list[[1]] + some_other_list[[2]]
# Perform stuff on every element of a list needs looping...
# Indexing works like with vectors
some_other_list[c(TRUE, FALSE, TRUE)]
a_list[1:3]
list_of_lists[c(-1,-3)]
a_list['a string']
# Access content of the list with double [[]]
a_list[['a string']]
# is.list works in the obviou way
is.list(a_list[3])
is.list(a_list[[3]])
# Use $-signs to access named elements of a list (similar to vectors)
a_list$'nice strings'
names(list_of_lists) <- c('one','two','three')
is.list(list_of_lists$three)
is.list(list_of_lists$two)
# Access nested elements
list_of_lists[['one']][1]
list_of_lists[['one']][[1]]
list_of_lists[[c('one',1)]] #however this looks nasty...
# Be careful when accessing a nonexistent element:
list_of_lists$four
list_of_lists[4]
list_of_lists[['four']]
list_of_lists[[4]] #the behaviour of R in this case it not nice...
## Converting Between Vectors and Lists
(a_vector <- c(4,3,2,4,5,9))
(a_list_from_a_vector <- as.list(a_vector))
is.list(a_list_from_a_vector)
# Converting from a list to a vector works with as.numeric, as.character etc.
# However, only if every element is of that type
as.numeric(list_of_lists)
as.numeric(a_list_from_a_vector)
# lists are very useful for storing data of the same type, but with a nonrectangular shape
# in this case as.-functions don't work
# use 'unlist'
(prime_factors <- list(
'two' = 2,
'three' = 3,
'four' = c(2,2),
'five' = 5,
'six' = c(2,3),
'seven' = 7,
'eight' = c(2,2,2),
'nine' = c(3,3)))
as.numeric(prime_factors)
unlist(prime_factors)
# Concatenating lists works as with vectors with c-function
c(list('one'=1, 'two'=2),list(2))
# One can also add other types or vectors (implicitely the as.list-function is used)
c(list(1,2), c(3,4,5),'stir it up', some_other_list$'some numbers')
# One can also use cbind and rbind but this result in some strange objects and therefore this should be used carefully
### NULL |
e2c78fac72cfca94c990fc0e44d28ecdbc97fe49 | 4419dcaad86d41cca6ad026a6a6c72e408fa62eb | /R/data.R | 10a84bcca40dfc709d8345d43660260ce52e0fa3 | [
"MIT"
] | permissive | poissonconsulting/mcmcr | c122a92676e7b1228eedb7edaebe43df823fdeb8 | ca88071369472483e7d73914493b99fd9bda9bd5 | refs/heads/main | 2023-06-24T09:43:04.640793 | 2023-06-13T00:19:29 | 2023-06-13T00:19:29 | 70,411,531 | 15 | 3 | NOASSERTION | 2022-06-21T03:07:27 | 2016-10-09T15:18:09 | HTML | UTF-8 | R | false | false | 142 | r | data.R | #' An example mcmcr object
#'
#' An example [mcmcr-object()]
#' derived from [coda::line()].
#'
#' @examples
#' mcmcr_example
"mcmcr_example"
|
3ab166299f0149642a88d9d8b0fda646a53289a7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/knitr/examples/knit2html.Rd.R | 430e043884436461815cdeb2426b3e67b6993bee | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 339 | r | knit2html.Rd.R | library(knitr)
### Name: knit2html
### Title: Convert markdown to HTML using knit() and markdownToHTML()
### Aliases: knit2html
### ** Examples
# a minimal example
writeLines(c("# hello markdown", "```{r hello-random, echo=TRUE}", "rnorm(5)", "```"),
"test.Rmd")
knit2html("test.Rmd")
if (interactive()) browseURL("test.html")
|
2be47663e190061163a0e0322347842a9669d5a5 | 3a412887ad6ee81f6da922bcf84f9bba946eeb9f | /snake_collision.R | 48f94b8cf662b7293cdaf95e78a48e770ba2ec75 | [
"MIT"
] | permissive | katerobsau/SketchSnake | 9953f7d9c79d5e8c93d0882f95b43bc8a618c58b | 14adaaad901657900dea8125a71d4a3c6ea60d6f | refs/heads/main | 2023-04-14T22:52:29.444931 | 2021-04-08T02:44:50 | 2021-04-08T02:44:50 | 349,600,864 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,989 | r | snake_collision.R | #! load_script(src = "https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.9.0/p5.js")
bkgrd_x = 400
bkgrd_y = 300
setup <- function() {
frameRate(10)
createCanvas(bkgrd_x, bkgrd_y)
}
draw <- function(){
#Draw the background colour
background(0, 0, 33)
# Snake details
snake_col = color('rgb(0,255,0)')
snake_wid = 10
start_snake_len = 15
# Initialise snake
if(frameCount < 2){
snake_stop = FALSE
snake_len = start_snake_len
snake_x = seq(0,start_snake_len)*snake_wid + bkgrd_x/2
snake_y = seq(0,start_snake_len)*0 + bkgrd_y/2
}
# Draw the snake
fill(snake_col)
for(i in 0:snake_len){
square(snake_x[i], snake_y[i], 10)
}
# Move our snake East
if(frameCount > 2 & snake_stop == FALSE){
# Plain right shift
if(keyCode == RIGHT_ARROW){
direction = "right"
old_snake_x = snake_x
old_snake_y = snake_y
for(i in 0:(snake_len - 1)){
snake_x[i] = old_snake_x[i+1]
snake_y[i] = old_snake_y[i+1]
}
new_x = old_snake_x[snake_len] + snake_wid
snake_x[snake_len] = new_x
if(new_x > bkgrd_x) snake_x[snake_len] = 0
}
# Plain left shift
if(keyCode == LEFT_ARROW){
old_snake_x = snake_x
old_snake_y = snake_y
# # if(direction == "right"){
# for(i in 1:snake_len){
# snake_x[i] = old_snake_x[i-1]
# snake_y[i] = old_snake_y[i-1]
# }
# new_x = old_snake_x[0] - snake_wid
# snake_x[0] = new_x
# if(new_x < 0) snake_x[0] = bkgrd_x
# # }
# if(direction != "right"){
for(i in 0:(snake_len - 1)){
snake_x[i] = old_snake_x[i+1]
snake_y[i] = old_snake_y[i+1]
}
new_x = old_snake_x[snake_len] - snake_wid
snake_x[snake_len] = new_x
if(new_x < 0) snake_x[snake_len] = bkgrd_x
# }
direction = "left"
}
# Downwards shift
if(keyCode == DOWN_ARROW){
direction = "down"
old_snake_x = snake_x
old_snake_y = snake_y
for(i in 0:(snake_len - 1)){
snake_x[i] = old_snake_x[i+1]
snake_y[i] = old_snake_y[i+1]
}
new_y = old_snake_y[snake_len] + snake_wid
snake_y[snake_len] = new_y
if(new_y > bkgrd_y) snake_y[snake_len] = 0
}
# Upwards shift
if(keyCode == UP_ARROW){
direction = "up"
old_snake_x = snake_x
old_snake_y = snake_y
for(i in 0:(snake_len - 1)){
snake_x[i] = old_snake_x[i+1]
snake_y[i] = old_snake_y[i+1]
}
new_y = old_snake_y[snake_len] - snake_wid
snake_y[snake_len] = new_y
if(new_y < 0) snake_y[snake_len] = bkgrd_y
}
for(i in 0:(snake_len - 1)){
dx = abs(snake_x[i] - snake_x[snake_len])
dy = abs(snake_y[i] - snake_y[snake_len])
if(dx < snake_wid & dy < snake_wid)
snake_stop = TRUE
}
}
}
# Clash with direction code and collision code
|
4f336d8f6e7a32dbfb60812f7560a558d3f517e0 | fc9ad71c519d3851ddbf97a071e1ba5c98590b77 | /Regresi-n/ejercicios.R | ddefa0002ffe703c76733fea81459cd3e8c82862 | [] | no_license | rgarciarui/Regresi-n | e7c2cb8c50089ec133074e5a4525d3c8fb15e5fe | d523a06988087b4173930198d3957764e77737c5 | refs/heads/master | 2020-09-16T08:51:16.928682 | 2018-06-12T00:20:08 | 2018-06-12T00:20:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,803 | r | ejercicios.R |
######SEGUNDO PUNTO ARTICULO DE ROCAS
datos <-read.table(file="https://raw.githubusercontent.com/fhernanb/datos/master/rocas", header=T)
require(rgl)
attach(datos)
plot3d(x=hume, y=esfu, z=poro, lwd=2, col='pink',
xlab='Humedad', ylab='RCU', type='s',
zlab='Porosidad')
mod1 <- lm(esfu ~ hume + poro, data = datos)
summary(mod1)
##Superficie de respuesta
Humedad <- seq(from=min(datos$hume), to=max(datos$hume), length.out=30)
Porosidad <- seq(from=min(datos$poro), to=max(datos$poro), length.out=30)
Rend <- function(hume, poro) {
res <- coef(mod1) * c(1, hume, poro)
sum(res)
}
Rend <- Vectorize(Rend)
Esfuerzo <- outer(Humedad, Porosidad, Rend)
persp(x=Humedad, y=Porosidad, z=Esfuerzo,
theta=40, phi=30, ticktype = "detailed", col='salmon1')
mod2 <- loess(esfu ~ hume + poro + hume*poro, data= datos,
degree = 1,span = 0.75)
summary(mod2)
hum <- with(datos, seq(min(hume), max(hume), len=10))
poros <- with(datos, seq(min(poro), max(poro), len=10))
newdata <- expand.grid(hume=hum, poro=poros)
fit.esfu <- matrix(predict(mod2, newdata), 10, 10)
persp(x=hum, y=poros, z=fit.esfu,
theta=45, phi=35, ticktype="detailed",
xlab="Humedad", ylab="Porosidad", zlab="Esfuerzo",
shade=0.2, col="lightblue", expand=0.7)
newdata <- data.frame(hume=0.4, poro=1.0)
predict(mod2, newdata)
names(mod1)
names(mod2)
x1 <- datos$esfu
x2 <- fitted.values(mod1)
cor(x1,x2)
y1 <- fitted(mod2)
cor(x1,y1)
## punto 3
#####PUNTO 4 SIMULACION
n <- 500
sig <- 3
x <- rpois(n=n, lambda = 5)
mu <- 36 - 12 *x + x^2
y <- rnorm(n=n, mean= mu, sd = sig)
plot(x,y)
mod1 <- lm(y~x)
summary(mod1)
par(mfrow=c(1,3))
plot(mod1, which = 1:3)
mod2 <- lm(y ~ x + I(x^2))
par(mfrow=c(1,3))
plot(mod2, which = 1:3)
plot(x,y)
fun <- function(x) sum(coef(mod2) * c(1, x, x^2))
fun <- Vectorize(fun)
curve(fun, from=0, to=15, col='blue', lwd=3, add=T)
#####PUNTO 5 REPLICAR EJEMPLO DE OPTIMIZACION DE SUPERFICIE DE RESPUESTA
f <- function(x,y) 2*x*(y**2)+2*(x**2)*y+x*y
x<- seq(-0.5,0.5, len=200)
y<- seq(-0.5,0.5, len=200)
z <- outer(x,y,f)
persp(x,y,z, theta=-30,phi=15,ticktype="detailed")
image(x,y,z) ## para ver los optimizadores
#derivada parcial con respecto a x y y
fx <- function(x,y,h=0.001) (f(x+h,y)-f(x,y))/h
fy <- function(x,y,h=0.001) (f(x,y+h)-f(x,y))/h
zfx <- outer(x,y,fx)
zfy <- outer(x,y,fy)
contour(x,y,zfx,level=0)
contour(x,y,zfy,level=0, add=T, col= "red")
x <- seq(-0.2,0,len=400)
y <- seq(-0.2,0,len=400)
z<- outer(x,y,f)
image(x,y,z)
contour(x,y,z,add=T)
##hallar los maximos de forma algebraica
fbb<-function(x) f(x[1],x[2]) #transforma de bivariada a univariada
optim(c(0.5,0.5), fbb ,control=list(fnscale=-1)) #????
fxb <- function(x) fx(x[1],x[2])
fyb <- function(x) fy(x[1],x[2])
sumssq <- function(x) fxb(x)**2+fyb(x)**2
optim(c(0.1,0.1),sumssq)
#####PUNTO 6
temp <- c(200, 250, 200, 250, 189.65, 260.35,
225, 225, 225, 225, 225, 225)
conc <- c(15, 15, 25, 25, 20, 20, 12.93, 27.07,
20, 20, 20, 20)
rend <- c(43, 78, 69, 73, 48, 76, 65, 74, 76, 79, 83, 81)
datos <- data.frame(temp, conc, rend)
mod <- lm(rend ~ temp + conc + I(temp^2) + I(conc^2) + temp * conc)
minus_rend <- function(x) {
temp <- x[1]
conc <- x[2]
new.data <- data.frame(temp=c(1, temp), conc=c(1, conc))
-predict(mod, new.data)[2]
}
inicio <- c(192, 15) # valores iniciales para la busqueda
names(inicio) <- c('Temperatura', 'Concentracion') # Colocando nombres
res <- nlminb(start=inicio, objective=minus_rend,
lower=c(189.65, 12.93), # minimos de las variables
upper=c(260.35, 27.07), # maximos de las variables
control=list(trace=1))
res # Para ver todo el contenido de res
res$par # Valores optimos
-res$objective # Valor del objetivo
temp <- x1
conc <- x2
rend <- y
minusll <- function(theta, x2, x1,y) {
media <- theta[1] + theta[2] *x1+theta[3]*x2+theta[4]*I(x1^2)+
theta[5]*I(x2^2)+theta[6]*x1*x2
# Se define la media
desvi <- theta[7] # Se define la desviación.
-sum(dnorm(x=y, mean=media, sd=desvi, log=TRUE))
}
res1 <- optim(par=c(0, 0, 0,0,0,0,0),
fn=minusll, method='L-BFGS-B',
lower=c(-Inf, -Inf,-Inf,-Inf,-Inf,-Inf,0 ),
upper=c(Inf, Inf, Inf,Inf,Inf,Inf,Inf), y=y, x1=x1,x2=x2)
#####PUNTO 7
load(url('https://www.dropbox.com/s/ud32tbptyvjsnp4/data.R?dl=1'))
lw1 <- loess(y ~ x,data=data)
plot(y ~ x, data=data,pch=19,cex=0.1)
lines(data$y,lw1$fitted,col="blue",lwd=3)
lw1 <- loess(y ~ x,data=data)
plot(y ~ x, data=data,pch=19,cex=0.1)
j <- order(data$x)
lines(data$x[j],lw1$fitted[j],col="red",lwd=3 )
##### PUNTO 8
help("loess")
#####PUNTO 11
require(MASS)
datos <- Cars93
attach(datos)
## a)
mod1 <- lm(MPG.city ~ Horsepower+Length)
mod2 <- lm(MPG.city ~ poly(Horsepower,
degree = 2, raw = FALSE)+ poly(Length, degree = 2,
raw = FALSE))
mod3 <- loess(MPG.city~ Horsepower+Length, data= datos)
## b)
par(mfrow= c(1,3))
plot(fitted.values(mod1), MPG.city, main = "Modelo 1", pch=10)
plot(fitted.values(mod2), MPG.city, main = "Modelo 2",pch=10)
plot(fitted.values(mod3), MPG.city, main = "Modelo 3",pch=10)
## d)
cor(MPG.city, fitted.values(mod1))
cor(MPG.city, fitted.values(mod2))
cor(MPG.city, fitted.values(mod3))
span <- seq(from=0.1, to=0.9, by=0.01)
plot(x=span, y=aux(span), las=1, pch=20, ylab='Correlacion')
##f)
aux <- function(x){
modelo <- loess(MPG.city~ Horsepower+Length, data= datos,span = x,degree = i)
predicciones <- predict(modelo,datos[,c(13,19)])
cor(MPG.city,predicciones)
}
aux <- Vectorize(aux)
span <- seq(from=0.1, to=0.9, by=0.01)
plot(x=span, y=aux(span), las=1, pch=20, ylab='Correlacion')
optimize(aux,c(min(span),max(span)), maximum=TRUE)
aux(0.1954733,2)
#####PUNTO 12
calcSSE <- function(x){
loessMod <- try(loess(MPG.city ~ Horsepower+ Length, data=datos,
span=x, degree = 2), silent=T)
res <- try(loessMod$residuals, silent=T)
if(class(res)!="try-error"){
if((sum(res, na.rm=T) > 0)){
sse <- sum(res^2)
}
}else{
sse <- 99999
}
return(sse)
}
optim(par=c(0.5), calcSSE, method="SANN")
##Punto 12
splin <- function(x){
modelo2 <- lm(MPG.city ~ bs(Horsepower,df=7)+bs(Length,df=x))
predicciones <- predict(modelo2,datos[,c(13,19)])
cor(MPG.city,predicciones)
}
splin <- Vectorize(splin)
spl <- seq(from=3, to=7, by=1)
plot(x=spl, y=splin(spl), las=1, pch=20, ylab='Correlacion')
optimize(splin,c(3,7), maximum=TRUE)
|
a7437d7bcdf135c57a01ac0f4247fb6dd1855616 | daaa779292260ca6373168bf5d30e3920001a63a | /R/query_user_features.R | 8f045dd6dc91e973a1ba2b608a5c1a26c4d39ae8 | [
"MIT"
] | permissive | meteomatics/R-connector-api | f075d2b84acffac052c4e74735d73ac24cef186c | f372db8bb736cea3b73b57f5369eb0cf39c2872a | refs/heads/master | 2023-03-10T16:15:05.825948 | 2023-01-26T16:07:08 | 2023-01-26T16:07:08 | 114,877,153 | 6 | 11 | null | 2023-01-26T16:07:10 | 2017-12-20T11:00:08 | R | UTF-8 | R | false | false | 2,031 | r | query_user_features.R | #' @title Query User Features
#'
#' @description
#' Provides information about your Meteomatics licensing options
#'
#' @param username A character vector containing the MM API username.
#' @param password A character vector containing the MM API password.
#'
#' @return A named character vector containing the licensing options.
#' @export
#'
#' @import httr
#' @import jsonlite
#'
#' @examples
#' username <- "r-community"
#' password <- "Utotugode673"
#' query_user_features(username, password)
# Def Query User Features Function
query_user_features <- function(username, password) {
.Deprecated(
msg = paste0(
"This function will be removed/renamed because it only provides ",
"info about the licensing options and not real user statistics. ",
"In addition, do not programmatically rely on user features since ",
"the returned keys can change over time due to internal changes."
)
)
# Call the httr::GET Function to get API Data
DEFAULT_API_BASE_URL <- constants("DEFAULT_API_BASE_URL")
response <-
httr::GET(sprintf(
paste0(DEFAULT_API_BASE_URL, "/user_stats_json"),
username,
password
), timeout = 310)
if (response$status_code != 200) {
# Error if the status_code of the query is not 200 -> print the Error Message of the API
exceptions_code <- exceptions(response$status_code)
stop(paste0(
exceptions_code,
": ",
httr::content(response, as = "text", encoding = "UTF-8")
))
}
# Extract user statistics from HTTP response
j <- jsonlite::fromJSON(httr::content(response, 'text'))
# Summarize desired Info in a Named Vector
res <- logical(3)
names(res) <-
c('area request option',
'historic request option',
'model select option')
res['area request option'] <-
j$`user statistics`$`area request option`
res['historic request option'] <-
j$`user statistics`$`historic request option`
res['model select option'] <-
j$`user statistics`$`model select option`
return(res)
}
|
430cbdc0ed15a09ef56767a244bdc9fa03b99ddf | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/4483_0/rinput.R | 212794632189f7fcb735c6c8a385d08e8151a6c4 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("4483_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4483_0_unrooted.txt") |
865a533d8ff7b091356107e33d56a91609c841ad | 3fdae0215a0bb7c66eddb9e4fe6f2093378c1e61 | /R/setExposureFromInput.r | a224ce42315a3c5c357ef36f08d787b63bbb526b | [] | no_license | Mapowney/resistance | 00c6276db839e2e4b7617fd38c442274198c7356 | e3da4c92b4d878b03344754c1112a640bbbaf85c | refs/heads/master | 2020-04-05T22:50:00.653943 | 2016-11-25T14:17:51 | 2016-11-25T14:17:51 | 61,367,744 | 0 | 0 | null | 2016-06-17T11:01:52 | 2016-06-17T11:01:52 | null | UTF-8 | R | false | false | 1,897 | r | setExposureFromInput.r | #' set exposure to insecticide from input vector
#'
#' fills an array of exposure values from input vector (i.e. where calculations have already been done)
#' see setExposure() to do the calculations
#'
#' @param input input vector
#' @param scen_num scenario number
#'
#' @examples
#' input <- setInputOneScenario()
#' expos <- setExposureFromInput( input )
#' @return array of exposure values for the different insecticides
#' @export
#'
setExposureFromInput <- function( input, scen_num )
{
# exposure to insecticides
# exposure array initialise with 0s
a <- createArray2( sex=c('m','f'), niche1=c('0','a','A'), niche2=c('0','b','B') )
## Exposure levels of males and females to each insecticide niche
# lower case = low concentration, upper case = high, 0 = absence
# males
a['m','0','0'] <- input[8,scen_num]
a['m','a','0'] <- input[9,scen_num]
a['m','A','0'] <- input[10,scen_num]
a['m','0','b'] <- input[11,scen_num]
a['m','0','B'] <- input[12,scen_num]
a['m','a','b'] <- input[13,scen_num]
a['m','A','B'] <- input[14,scen_num]
a['m','A','b'] <- input[15,scen_num]
a['m','a','B'] <- input[16,scen_num]
#allow rounding errors
if ( !isTRUE( all.equal(1, sum(a['m',,]) ))){
stop( paste("Error in male exposures: must total one: ", sum(a['m',,])) )
}
# females
a['f','0','0'] <- input[17,scen_num]
a['f','a','0'] <- input[18,scen_num]
a['f','A','0'] <- input[19,scen_num]
a['f','0','b'] <- input[20,scen_num]
a['f','0','B'] <- input[21,scen_num]
a['f','a','b'] <- input[22,scen_num]
a['f','A','B'] <- input[23,scen_num]
a['f','A','b'] <- input[24,scen_num]
a['f','a','B'] <- input[25,scen_num]
#allow rounding errors
if ( !isTRUE( all.equal(1, sum(a['f',,]) ))){
stop( paste("Error in female exposures: must total one: ", sum(a['f',,])) )
}
return(a)
} |
49d9c93e005db41257a31d3818659882c45077b6 | 24388c5aceeca5827720230f20deaa8c27be5606 | /man/x.Rd | 5d3faa9938d5406fa5133fe2ef0f8b985aa43e0a | [
"MIT"
] | permissive | zhangcal/CalvinZhangGIS3Lab7R | 995a2d88c124bbce056cd32e538a903d7491ec31 | 4c09f4f63763cb95cc86bc0c5b647b06d7768b22 | refs/heads/main | 2023-04-30T19:43:44.895441 | 2021-05-12T09:04:26 | 2021-05-12T09:04:26 | 366,654,242 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 389 | rd | x.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mtcars.R
\docType{data}
\name{x}
\alias{x}
\title{A list of 1000 randomly generated numbes}
\format{
Mtcars dataset from R
\describe{
\item{mpg}{numeric, Miles/(US) gallon}
\item{cyl}{numeric, Number of cylinders}
...
}
}
\usage{
x
}
\description{
A list of 1000 randomly generated numbes
}
\keyword{datasets}
|
1ba4e2d8a484d34aedae0d92c45c791c6c76f696 | 80e691dfc84372960d52ce874c6d922b0a144ff5 | /cachematrix.R | d92fdc5a8f51e1f59c956f72b5c2c851c0db4aad | [] | no_license | dhyvc/ProgrammingAssignment2 | 881b0d21e27c7616267ab2d12f472004a0aeca2d | e2e258b45f78e34686e8edafb02eccbced7b2914 | refs/heads/master | 2021-01-18T02:42:41.091394 | 2014-12-11T15:24:07 | 2014-12-11T15:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,270 | r | cachematrix.R | ## This file includes a pair of functions for calculating
## the inverse of a matrix, either by returning a previous
## cached value, or calculating it and caching it for
## future use.
## The first function creates a set of helper functions for
## supporting caching the calculation of the inverse of
## a matrix. These functions include:
##
## 1. Setting the matrix in the global environment
## 2. Getting the matrix from the global environment
## 3. Setting the value of the matrix inverse
## 4. Getting the value of the matrix inverse
##
## The function returns a list of the above functions
makeCacheMatrix <- function(x = matrix()) {
i <- NULL ## Initialize the inverse cache variable
set <- function(y) { ## This function is not used in our assignment
x <<- y ## Store the matrix in a global variable
i <<- NULL ## Store an empty matrix inverse cache global variable
}
get <- function() x ## Retrieve the matrix
setinv <- function(inv) i <<- inv ## Set the calculated matrix inverse
## in the global environment cache
getinv <- function() i ## Get the cached matrix inverse value
list(set = set, get = get, ## Return the list of helper functions.
setinv = setinv,
getinv = getinv)
}
## The second function provides the inverse matrix calculation, either
## by using a preexisting cached result, or a new calculation.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Check if the matrix is identical to the cached matrix
## and a cached value has been calculated. If so, return it.
i<-x$getinv() ## Get the cached inverse matrix value
if (!is.null(i)){ ## Check if the value reflects a previous calculation
message("The inverse value has already been cached. Here it is:")
return (i)
}
## If there was no cached result, calculate
## the inverse matrix and return it.
else{
data<-x$get() ## Get the stored matrix
i<-solve(data) ## Calculate the inverse matrix value
x$setinv(i) ## Store the calculated value for later use
i ## Return the result
}
}
|
73be600dbb9064f7f28b53cb3426d959817e836d | ff6192122cbc1c6e5ada69f7f9e4a97f4b891d7e | /man/resolve_rest_format.Rd | 1c03cac40ff056781aa580d084e42c4b8ed1c088 | [
"MIT"
] | permissive | MoseleyBioinformaticsLab/jpredapir | a3b2d3b35058c0ff489390cc0cb4b12a72e4ec9f | 50fb1dcfab4729636df37512f387f7efaef56b7c | refs/heads/master | 2020-03-22T19:22:37.959403 | 2018-09-28T04:03:40 | 2018-09-28T04:03:40 | 140,523,772 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 572 | rd | resolve_rest_format.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{resolve_rest_format}
\alias{resolve_rest_format}
\title{Determine JPred REST interface submission format.}
\usage{
resolve_rest_format(mode, user_format)
}
\arguments{
\item{mode}{Submission mode, possible values: "single", "batch", "msa".}
\item{user_format}{Submission format, possible values: "raw", "fasta", "msf", "blc".}
}
\value{
Format for JPred REST interface.
}
\description{
Resolve format of submission to JPred REST interface based on provided mode and user format.
}
|
29394dd0dffe065e52f4b78ab6b88a2a4623d7b6 | 580684d0451310bfbefcd793fd2cc77ae87251fd | /Scraping_Bild.R | 9cc1d7583207bce313fa8d7cef5f9ee7ca5a958b | [] | no_license | Topf/webscraping_news | 0d7f92fc386f51545a1037541fea67abf109e274 | 5d23712679f6fa6a8c9b69d1f88042110c5def75 | refs/heads/master | 2022-10-11T17:59:17.044045 | 2020-06-09T19:14:01 | 2020-06-09T19:14:01 | 271,087,393 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,800 | r | Scraping_Bild.R | #install.packages("rvest")
#install.packages("tidyverse")
#install.packages("data.table")
#install.packages("plyr")
#install.packages("xlsx")
library(rvest)
library(tidyverse)
library(data.table)
library(plyr)
library(xlsx)
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
map_dfc(.x = c("h3", "li time"),
.f = function(x) {read_html("https://www.bild.de/suche.bild.html?type=article&query=corona&resultsStart=0&resultsPerPage=80") %>%
html_nodes(x) %>%
html_text()}) %>%
bind_cols(href = read_html("https://www.bild.de/suche.bild.html?type=article&query=corona&resultsStart=0&resultsPerPage=80") %>%
html_nodes("#innerWrapper > div.faux.clearfix > div > section > ol > li:nth-child(n) > div.hentry.landscape.search.t9l > a") %>%
html_attr("href")) %>%
setNames(nm = c("title", "time", "href")) -> temp
temp$href <- paste("https://www.bild.de", temp$href, sep="")
map_df(.x = temp$href[1:80],
.f = function(x){tibble(href = x,
text = read_html(x) %>%
html_nodes("#innerWrapper > main > div.faux > div > article > div.txt > p:nth-child(n)") %>%
html_text() %>%
list
)}) %>%
unnest(text) -> foo
foo
foo <- foo %>%
# recode empty strings "" by NAs
na_if("") %>%
# remove NAs
na.omit
foo <- setDT(foo)[, lapply(.SD, function(x) toString(na.omit(x))), by = url]
X2 <- ddply(foo, .(url), summarize,
Xc=paste(text,collapse=","))
final <- merge(temp, X2, by="url")
write_xlsx(vallah,"C:/Users/User/Desktop/drip2.xlsx")
|
77dd17a365a396fd29fe4027614586eec05c1a76 | b56eaa12d1d095388767fbe45fe1710a2426cbc4 | /class_2/class_2.R | 03aa72ae9f54d04bd11eafa4fab9ed214af8601b | [] | no_license | misrori/web_scraping | fdb91af923fe66e09d24b9adf95ee73758982282 | 0554c53f28002a82cf0ee9ff1cd6ed8b6e2ea08e | refs/heads/master | 2020-08-27T04:45:58.309951 | 2019-12-04T14:12:37 | 2019-12-04T14:12:37 | 217,248,015 | 6 | 16 | null | 2019-11-29T14:43:43 | 2019-10-24T08:19:55 | Jupyter Notebook | UTF-8 | R | false | false | 4,238 | r | class_2.R | # warm up task
#get the news, the summary, and the title into a dataframe
#https://www.economist.com/leaders/
library(rvest)
my_url <- 'https://www.economist.com/leaders/'
get_one_page <- function(my_url) {
t <- read_html(my_url)
my_titles <-
t %>%
html_nodes('.flytitle-and-title__title')%>%
html_text()
my_relative_link <-
t %>%
html_nodes('.teaser')%>%
html_nodes('a')%>%
html_attr('href')
my_links <- paste0('https://www.economist.com/', my_relative_link)
my_teaser <-
t %>%
html_nodes('.teaser__text')%>%
html_text()
#
return(data.frame('title'= my_titles, 'teaser'= my_teaser, 'link'= my_links))
#
}
res <- get_one_page('https://www.economist.com/leaders/')
#
# create named list
my_first_list <- list('first'='first_text', 'second'= 42, 'my_mtcars'= mtcars)
str(my_first_list)
my_first_list$first
my_first_list <- list('first'='first_text', 'second'= 42, 'my_mtcars'= mtcars,
"more_things" =list('my_letters'= letters, "my_hist"= hist(1:100) ))
#
getwd()
# game of thrones download
for (my_id in c(1:20)) {
print(my_id)
my_url <- paste0('https://deathtimeline.com/', my_id, '.jpg')
print(my_url)
my_saving_path <- paste0('gameofthrones/', my_id, '.jpg')
download.file(my_url, my_saving_path)
print(my_saving_path)
}
lapply(letters,function(my_id){
print(my_id)
})
for (my_del_id in 2:4) {
file.remove(paste0('/home/mihaly/R_codes/web_scraping/gameofthrones/',my_del_id,'.jpg'))
}
file.remove('/home/mihaly/R_codes/web_scraping/gameofthrones/1.jpg')
coins <- fromJSON('https://coinpaprika.com/ajax/coins/',simplifyDataFrame = F)
myurl<-paste0("https://partners.api.skyscanner.net/apiservices/browsequotes/v1.0/HU-sky/HUF/en-US/BUD-sky/Anywhere/","2020-02-01","/","2020-02-10","?apiKey=ah395258861593902161819075536914")
t <- fromJSON(myurl)
View(t$Quotes)
View(t$Places)
coins$ath$usd$updated_at$
mtcars$gear
plot(my_first_list$more_things$my_hist)
nrow(my_first_list$my_mtcars)
sum(my_first_list$my_mtcars$hp)
library(jsonlite)
my_first_list <- list('first'='first_text', 'second'= 42, 'my_mtcars'= mtcars,
"more_things" =list('my_letters'= letters ))
toJSON(my_first_list)
toJSON(my_first_list,auto_unbox = T)
toJSON(my_first_list,auto_unbox = T, pretty = T)
write_json(my_first_list, 'my_first.json', auto_unbox = T, pretty = T)
my_death_2 <- fromJSON('https://deathtimeline.com/api/deaths?season=2')
my_death_list_2 <- fromJSON('https://deathtimeline.com/api/deaths?season=2', simplifyDataFrame = F)
get_one_season <- function(season_id) {
my_url <- paste0('https://deathtimeline.com/api/deaths?season=', season_id)
t_table <- fromJSON(my_url)
return(t_table)
}
get_one_season(5)
my_death_list_of_df <-lapply(1:6, get_one_season)
library(data.table)
final_df <- rbindlist(my_death_list_of_df)
# geting started with lists
my_list <- list('first'='my_first_string', 'second'=42, 'hundred'= c(1:100), my_mtc = mtcars, 'list_again' = list('some_more'= 'it is not complicated', 'one_plot' = hist(1:100)) )
my_list <- list('first'='my_first_string', 'second'=42, 'hundred'= c(1:100), my_mtc = mtcars, 'list_again' = list('some_more'= 'it is not complicated') )
my_list$first
sum(cumsum(my_list$hundred))
sum(my_list$my_mtc$cyl)
my_list$list_again$one_plot
my_list <- list()
for (i in c(1:10)) {
#my_list[[paste0('the element ', i, ' is: ')]] <- letters[i]
my_list[[paste0('element_', i, '_is')]] <- letters[i]
}
my_list[['ten']] <- c(1:10)
library(jsonlite)
toJSON(my_list)
toJSON(my_list,auto_unbox = T)
toJSON(my_list,auto_unbox = T,pretty = T)
write_json(my_list , 'my_res.json', auto_unbox = T, pretty=T)
back_to_list <- fromJSON('my_res.json')
toJSON(mtcars)
coins <- fromJSON('https://coinpaprika.com/ajax/coins/')
my_base_data<-do.call(data.frame, coins)
coins$tags <- NULL
my_base_data<-do.call(data.frame, coins)
death <- fromJSON('https://deathtimeline.com/api/deaths?season=1', simplifyDataFrame = F)
for (i in c(1:12)) {
my_url <- paste0('https://deathtimeline.com/',i,'.jpg')
print(my_url)
download.file(my_url, paste0('gameofthrones/',i,'.jpg'))
}
my_list
names(my_list)
paste0( unlist(my_list), collapse = "#")
length(paste( unlist(my_list)))
|
4719eccabdab8783b374519996ca763e90f6dae1 | 749c84ae1571427a5f8c86787ec767c9c9751326 | /llike.r | f27de1d914c579a0cab00329a08032b4997eb63b | [] | no_license | eki1381/GWMLR | 3ef626e0d80db37341624edd75a00a474e517c23 | 5d7d8f4c826023f0b973afa666cef37c3d611367 | refs/heads/master | 2021-01-19T06:41:06.088314 | 2016-07-14T15:51:20 | 2016-07-14T15:51:20 | 61,521,980 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 411 | r | llike.r | llike <- function(y.design.2,x.design.1,beta.1.temp,N,J,K){
res <- 0
for(i in 1:N){
eq.1.2 <- 0
eq.2.1 <- 0
for(j in 1:(J-1)){
eq.1.1 <- 0
for(k in 1:(K+1)){
eq.1.1 <- eq.1.1 + (x.design.1[i,k]*beta.1.temp[k,j])
}
eq.1.2 <- eq.1.2 + (y.design.2[i,j]*eq.1.1)
eq.2.1 <- eq.2.1 + exp(eq.1.1)
}
res <- res + (eq.1.2 - log(1+eq.2.1))
}
return(res)
}
|
b121debb0c1ed6c06ad823db622ae45618c65842 | 3244df900eb5aafe74a49c02f5f332824f220554 | /derive_model_from_url.R | cb4dfa85ded3b6e2da62fa245fbcb3466796fa94 | [] | no_license | Studentenfutter/cars-inequality | d1d94aa0922006cf7c29276600c94ae19dfd3b3e | 2a5c62a507c5863a7ab21c5c7218587c35b27ac7 | refs/heads/master | 2022-12-07T12:51:21.547515 | 2020-08-20T12:23:07 | 2020-08-20T12:23:07 | 200,675,462 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,321 | r | derive_model_from_url.R | name_list <- list( list('VW-Polo', 'kleinwagen'), list('Opel-Corsa','kleinwagen'),
list('Ford-Fiesta', 'kleinwagen'), list('Mercedes-Benz-E-Klasse','Sportwagen'),
list('BMW-Z4','Sportwagen'), list('Porsche-911','Sportwagen'),
list('Opel-Astra', 'kompaktklasse'), list('Audi-A3', 'kompaktklasse'),
list('VW-Golf', 'kompaktklasse'), list('VW-Tiguan', 'gelaendewagen'),
list('BMW-X1', 'gelaendewagen'), list('Audi-Q5', 'gelaendewagen'),
list('Smart-Fortwo', 'minis'), list('Fiat-Panda', 'minis'),
list('Renault-Twingo', 'minis'), list('Mercedes-Benz-C-Klasse', 'mittelklassse'),
list('BMW-3er', 'mittelklassse'), list('VW-Passat', 'mittelklassse'),
list('Mercedes-Benz-E-Klasse', 'obere_mittelklasse'), list('BMW-5er', 'obere_mittelklasse'),
list('Audi-A6', 'obere_mittelklasse'),
list('Mercedes-Benz-S-Klasse', 'oberklasse'), list('BMW-7er', 'oberklasse'),
list('Audi-A8', 'oberklasse'))
df$car <- NA
for (i in 1:length(name_list)){
df$car <- ifelse(grepl(tolower(toString(name_list[[i]][1])), df$url), tolower(name_list[[i]][1]), df$car)
print(tolower(toString(name_list[[i]][1])))
}
|
c4d14acc1aacf308c494e4d8c7ac55698dd874c7 | c52b4ecffe7752fe5b999814ad8d735e81473269 | /tests/testthat/test-point_error.R | 5968c70c641d3216312f8517437a129d0ae3f42a | [
"MIT"
] | permissive | UAB-BST-680/tblStrings | bd96275243f9e30ce4c8f4183ed7979f84f4e427 | e753b19131985438eb9d87253647951fee1f1f2a | refs/heads/master | 2021-04-18T03:19:53.766910 | 2020-03-23T17:34:13 | 2020-03-23T17:34:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,682 | r | test-point_error.R |
test_that(
"vector behavior is correct",
{
points <- c(1:10)
errors <- rep(20, 10)
points[2] <- NA
errors[2] <- NA
points[3] <- NA
errors[4] <- NA
# Construction ----
p <- pointErr(points, errors, style = 'brac',
brac_left = '[[', brac_right = ']]')
q <- pointErr(points, errors)
expect_error(pointErr(1, -1), 'must be >0')
expect_is(p, 'tblStrings_pointErr')
expect_true(is_pointErr(p))
# Formatting ----
expect_equal(
as.character(p),
c(
"1.00 [[20.0]]",
"NA [[NA]]",
"NA [[20.0]]",
"4.00 [[NA]]",
"5.00 [[20.0]]",
"6.00 [[20.0]]",
"7.00 [[20.0]]",
"8.00 [[20.0]]",
"9.00 [[20.0]]",
"10.0 [[20.0]]"
)
)
# Casting ----
expect_is(as_pointErr(c(1,3)), 'tblStrings_pointErr')
mat = matrix(c(points, errors), ncol=2)
dat = as.data.frame(mat)
lst = list(points, errors)
m = as_pointErr(mat)
d = as_pointErr(dat)
l = as_pointErr(lst)
# Coercion ----
r <- c(m, d)
expect_equal(r[1], r[11])
expect_is(r, 'tblStrings_pointErr')
expect_is(c(p, pointErr(0,1)), 'tblStrings_pointErr')
expect_error(c(p, '1'))
expect_error(c(p, 1))
expect_error(c(p, Sys.time()))
# Comparisons and math ----
csum = sum(points, na.rm=TRUE)
dsum = sum(errors, na.rm=TRUE)
chr_to_dbl(vctrs::vec_data(m))
answer <- pointErr(csum, dsum)
expect_equal( sum(m, na.rm=TRUE), answer )
expect_equal( sum(d, na.rm=TRUE), answer )
expect_equal( sum(l, na.rm=TRUE), answer )
expect_true(m[1] < m[5])
# Front-end ----
}
)
|
5089caa86ecbdecc581f28cd24be9eeddf158227 | 84ec00770c4947c0af1980c971df718eac2740a1 | /plot4.R | 4fa506343b787914506a248c541cfb38625f4def | [] | no_license | rbroderson/ExData_Plotting1 | 6b6d392c948cedfcc800703644d4da69ae4f2764 | ca40c9db0a407814069b6e629e4a34acddab99e2 | refs/heads/master | 2021-01-18T00:14:30.981192 | 2015-04-10T16:51:52 | 2015-04-10T16:51:52 | 33,684,780 | 0 | 0 | null | 2015-04-09T18:15:20 | 2015-04-09T18:15:20 | null | UTF-8 | R | false | false | 1,507 | r | plot4.R | #IMPORTANT: The household_power_consumption.txt file must be in your working directory.
p1 <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
library(datasets)
p1s <- p1[as.Date(p1$Date, "%d/%m/%Y") %in% as.Date(c('2007-02-01', '2007-02-02')),]
p1s$newtime <- as.POSIXct(paste(p1s$Date, p1s$Time), format="%d/%m/%Y %H:%M:%S")
png("plot4.png", width=480, height=480)
par(mfrow = c(2,2))
#Create plot A
with(p1s,plot(as.POSIXct(p1s$newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(Global_active_power)), type="l", xlab="", ylab="Global Active Power (kilowatts)"))
#Create plot B
with (p1s, plot(as.POSIXct(newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(Voltage)), type="l", ylab='Voltage', xlab='datetime'))
#Create plot C
with (p1s, plot(as.POSIXct(newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(Sub_metering_1)), type="l", xlab='', ylab='Energy sub metering'))
lines(as.POSIXct(p1s$newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(p1s$Sub_metering_2)), type="l", xlab='', ylab='', col="red")
lines(as.POSIXct(p1s$newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(p1s$Sub_metering_3)), type="l", xlab='', ylab='', col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1), col=c("black","red","blue"))
#Create plot D
with (p1s, plot(as.POSIXct(newtime, '%Y/%m/%d %H:%M:%S'), as.numeric(as.character(Global_reactive_power)), type="l", ylab='Global_reactive_power', xlab='datetime'))
dev.off()
|
141fd5f5ac2bc66f25feb9d59179e65d076b7d64 | 18509ba0e3ccd7570eb75915689856993e9cce20 | /create_tidy_data.R | bf121b6cbec40420a9a16888d67626abec194069 | [] | no_license | DolphinWorld/coursera_cleardata | 11744e88b70efa850cee12589b7253278555eaa3 | 16fb1b26217179e961376b5a781be85ef903ff0d | refs/heads/master | 2020-05-17T02:51:28.189582 | 2015-02-24T03:10:04 | 2015-02-24T03:10:04 | 31,164,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 334 | r | create_tidy_data.R | #gather data, so that the column becomes: subject, activity, feature, and measure
data_gathered <- gather(data, feature, measure, tBodyAccmeanX:fBodyBodyGyroJerkMagstd)
#create data_tidy with summarized means
data_tidy <- ddply(data_gathered, c("subject", "activity", "feature"),
summarize, mean=mean(measure) )
|
f8cea05bf5bc5506ebadd909f20da006b4d9a17a | 744bcd91563f50597a96f02571726d769df984b4 | /BeyleGovJar.R | c34109c3d201ee013ceb274ec6f9fa7756ae4642 | [] | no_license | zoeang/StateGovernors1950-2008 | a9e2c240c02e6aac4a6663caaf3bd46962207d20 | a74f509cd00fae1028f2eb239af80ebe9855d966 | refs/heads/master | 2020-03-17T07:30:02.958459 | 2018-08-16T19:42:11 | 2018-08-16T19:42:11 | 133,400,859 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,596 | r | BeyleGovJar.R | #==============================================================================
#Beyle Gov Job Approval Rating
#==============================================================================
jar<-read.csv('C:/Users/zoeja/OneDrive/Documents/Summer 2018/GovernorData/StateGovernors1950-2008/BeyleJAR/BeyleGovJAR.csv')
states<-dat$st[order(unique(dat$st))] #order states alphabetically; relies on csp loaded as 'dat'
states<-states[-c(1,11)] #remove second AZ
#==============================================================================
# Aside: It is incredibly inconvenient that the state names are not in the same alphabetical order
# as the state codes. I don't know if I should be disappointed with the individual responsible for this,
# or myself for my 23 years of ignorance on this matter. It just feels a bit personal.
#==============================================================================
# Order state codes to match state name order
statesorder<-states[c(2,1,4,3, 5:11,13:15,12,16:18,21,20,19,
22,23,25,24,26,29,33,30:32,34,27,28,35:44,
46,45,47,49,48,50)]
jar$st<-sapply(jar$STATE, function(x) statesorder[x]) #match state number to state code
jar<-jar[,c(1,ncol(jar),2:(ncol(jar)-1))] # move the state columns next to one another
#----------------------------------------
#whatever I did above was dumb and probably took me more time than I spent on below
#-------------------------------------------------------------------------------
statesorder<-as.data.frame(cbind(seq(1:50),
c('AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME',
'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH',
'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI',
'SC', 'SD', 'TN', 'TX', 'UT',' VT', 'VA','WA', 'WV', 'WI', 'WY')))
names(statesorder)<-c('STATE', 'st')
jar1<-merge(statesorder, jar, by='STATE')
#----------------------------------------------------------------------------------
# Find year the JAR question was in the field to get a yearly measure of gov JAR
jar1yr<-jar1[which(!(jar1$YEARIN==jar1$YEAROUT)),] # observations where the question spans over the calendar year (such as oct to jan)
changeyr<-which(!(jar1$YEARIN==jar1$YEAROUT)) #vector of rows that have weird survey dates
jar1$YEAROUT[changeyr]<-c(1992, NA, NA, NA, NA, 1994, NA, #replace with more accurate number
NA, NA, NA, NA, 1995, NA, 1996,
NA, 1991, 1992, 1992, 1993, 1994, 1996)
names(jar1)
jarvars<-jar1[,c('st', 'NAME', 'POSPCT', 'NEGPCT', "RATINGSC", 'YEAROUT', 'SAMPLE')]
jarvars$POSPCT<-jarvars$POSPCT/100 #convert to proportion
jarvars$NEGPCT<-jarvars$NEGPCT/100 #convert to proportion
names(jarvars)[-1]<-c('gov.name', 'percent.approve', 'percent.dissaprove', 'JAR.rating', 'year', 'sample.size')
class(jarvars$sample.size)
#get average for gov by year
library(dplyr)
totsample.names<-jarvars%>% #find average approval/disapproval means; not weighted
group_by(year, st)%>%
summarise(surveys=n(),
mean.approve=sum(percent.approve)/surveys,
mean.disapprove=sum(percent.dissaprove)/surveys,
mean.rating=sum(RATINGSC)/surveys)
jarvars1<-totsample[,-3]
setwd('C:/Users/zoeja/OneDrive/Documents/Summer 2018/GovernorData/StateGovernors1950-2008/BeyleJAR')
write.csv(jarvars1, "GovJAR.csv") #df of approval/disapproval by st and year
|
fd1e50d4fc9971b414138b7d2c4c44a163dac844 | 5a389396139299bbbcd0a7725d704bbad0b89f2a | /man/h3_string_to_int.Rd | 45a262c2de4c102551f6809a037bde3af2afed39 | [] | no_license | NickCH-K/placekey | 0296e50e61c90f1bfe52d0a5f60c2df5556427be | 3ede82969e3fbbf2decaebeff45734a1b13b436f | refs/heads/master | 2023-01-08T11:34:09.452507 | 2020-11-02T07:36:53 | 2020-11-02T07:36:53 | 309,290,589 | 1 | 0 | null | 2020-11-02T07:35:32 | 2020-11-02T07:29:37 | R | UTF-8 | R | false | true | 463 | rd | h3_string_to_int.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{h3_string_to_int}
\alias{h3_string_to_int}
\title{Convert an H3 index string to 64 bit integer}
\usage{
h3_string_to_int(index)
}
\arguments{
\item{index}{h3 index as a hex representation character vector. See \code{\link{getIndexFromCoords}}}
}
\description{
h3_string_to_int takes an h3 index represented as a hexadecimal string and returns the integer format.
}
|
4fc37035ac4bddf17001ce1eb8c34e76dc122dbb | 584025b690582ab9ac588c77ebc5e746e95816f8 | /man/print.html.Rd | 187f86f639a24df57fceda20a507f3cce01dd39d | [] | no_license | rstudio/htmltools | 27b459793e6c7dfb12bcd196e72378072d85e92a | 251526c9886ca9ab5460c7bd4d73c0d61e2b40b8 | refs/heads/main | 2023-08-17T23:16:13.900249 | 2023-08-14T19:49:04 | 2023-08-14T19:49:04 | 18,398,793 | 210 | 82 | null | 2023-09-08T14:34:59 | 2014-04-03T10:11:03 | R | UTF-8 | R | false | true | 760 | rd | print.html.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tags.R
\name{print.shiny.tag}
\alias{print.shiny.tag}
\alias{print.html}
\title{Print method for HTML/tags}
\usage{
\method{print}{shiny.tag}(x, browse = is.browsable(x), ...)
\method{print}{html}(x, ..., browse = is.browsable(x))
}
\arguments{
\item{x}{The value to print.}
\item{browse}{If \code{TRUE}, the HTML will be rendered and displayed in a
browser (or possibly another HTML viewer supplied by the environment via
the \code{viewer} option). If \code{FALSE} then the HTML object's markup
will be rendered at the console.}
\item{...}{Additional arguments passed to print.}
}
\description{
S3 method for printing HTML that prints markup or renders HTML in a web
browser.
}
|
ff4b945230227ab2de40a813a96d610313205f15 | 144e1b215a8546d820f929055138b06eb67eda74 | /set_buy_sell_rules.R | 9080c98de9321ab3f6893ba8ffee726bcda0015d | [] | no_license | Mentalaborer/TradeNow | 9acdb1bd5a9e0822fded0ec89aab9f80771845cb | 7b82093f0324ab2a314216fa950f7a8e42c7a5e2 | refs/heads/master | 2020-11-26T01:04:15.277242 | 2020-07-05T20:52:12 | 2020-07-05T20:52:12 | 228,915,006 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,351 | r | set_buy_sell_rules.R |
######## DEPRECATE - THIS IS NOW FUNCTIONALIZED ######
# Purpose: create trading signal based on simple filter rule. Recall that simple
#filter rule suggests buying when the price increases a lot compared to the yesterday price
# source('global_filters.R')
###### Generate Day Trading Signals ######
# ### Signal 1: Based on Simple Filter Rule ###
# for (i in 2: length(price)){
# if (price_change[i] > delta){
# signal[i]<- 1
# } else
# signal[i]<- 0
# }
#
# # Assign time to action variable using reclass;
# signal<-reclass(signal, price)
# colnames(signal) <- 'price_change_met'
#
# ## Apply Trading Rule
#
# # buying when the price increases a lot (by the threshold)
# trade <- lag(signal, 1) # trade based on yesterday's signal
# stock_return<-dailyReturn(focal_stock_adjusted)*trade # daily profit rate = daily return (close - open / open)
# names(stock_return)<-'filter'
# # Chart with New Trading Rule
# chartSeries(focal_stock_adjusted,
# type = 'line',
# subset='2020',
# plot = TRUE,
# theme=chartTheme('black'))
#
# addTA(signal,type='S',col='red')
#
# #Performance Summary
# charts.PerformanceSummary(stock_return, main="Naive Buy Rule")
## Create Simple Filter Buy-Sell Rule ##
# Goal: create trading signal based on simple filter rule.
# Recall that simple filter rule suggests buying when the price
# increases a lot compared to the yesterday price and selling
# when price decreases a lot
# # Signal 2: Based on Simple Filter (Naive)
# for (i in 2: length(price)){
# if (price_change[i] > delta){
# signal[i]<- 1
# } else if (price_change[i]< -delta){
# signal[i]<- -1
# } else
# signal[i]<- 0
# }
#
# ## Apply Trading Rule
# signal_buy_sell<-reclass(signal, price)
# trade_2 <- Lag(signal_buy_sell)
# stock_return_2<-dailyReturn(focal_stock_adjusted)*trade_2
# names(stock_return_2) <- 'Naive'
# charts.PerformanceSummary(stock_return_2)
### Signal 3: Based on RSI ###
# for (i in (day+1): length(price)){
# if (rsi[i] < rsi_buy_cutpoint){ #buy if rsi < rsi_cutpoint
# signal[i] <- 1
# }else { #no trade all if rsi > rsi_cutpoint
# signal[i] <- 0
# }
# }
#
# ## Apply Trading Rule
# signal_rsi<-reclass(signal, price)
# trade_3 <- Lag(signal_rsi)
#
# #construct a new variable ret2
# ret2 <- dailyReturn(price)*trade_2
# names(ret2) <- 'Naive'
# # construct a new variable ret3
# ret3 <- dailyReturn(price)*trade_3
# names(ret3) <- 'RSI'
# compare strategies with filter rules
# signal_compare <- cbind(ret2, ret3)
# charts.PerformanceSummary(signal_compare,
# main="Naive v.s. RSI")
### Signal 4: Based on EMA and RSI ###
# Buy signal based on EMA rule
# Sell signal based on RSI rule
# for (i in (day+1):length(price)){
# if (price_change[i] > delta){
# signal_combine[i]<- 1
# } else if (rsi[i] > rsi_sell_cutpoint){
# signal_combine[i]<- -1
# } else
# signal_combine[i]<- 0
# }
# signal_combine<-reclass(signal_combine,price)
#
#
# ## Apply Trading Rule
# trade_4 <- Lag(signal_combine)
# ret4<-dailyReturn(focal_stock_adjusted)*trade_4
# names(ret4) <- 'Combine'
# retall <- cbind(ret2, ret3, ret4)
#
# charts.PerformanceSummary(
# retall, main="Naive v.s. RSI v.s. Combine",
# colorset=bluefocus)
|
e689c5510b87cac69f94e9f786fedb61b1f593d5 | b8fc0f17b7eade56da95f5a6fc882173c4c6e3c9 | /GARCH_func_code.R | 8e38525e85db0eddf057c9f32fa75caf85825434 | [] | no_license | alvinchow8/Quant-Stock-Price-Analysis | 3c61084610cd3a4a84dc323e872bcc03ff839c43 | 4910f898d5e0e752113d3f33fd2aefd0f4a6f092 | refs/heads/master | 2020-03-21T23:01:54.312968 | 2018-06-29T14:57:01 | 2018-06-29T14:57:01 | 139,162,136 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,356 | r | GARCH_func_code.R | require(roxygen2)
# ===========================================================
# create_GARCH_obj (asset) creates GARCH objects
#' @param asset - individual asset t x 1 vector
#' @param sigma1 - initial sigma1 to initialize GARCH objects
#' @param sigma2 - initial sigma2 to initialize GARCH objects
#' @param T_out - number of time periods for object to predict
#' @export - returns GARCH objects
# ===========================================================
create_GARCH_obj = function(asset,sigma1,sigma2,T_out){
t = length(asset)
stan_input = list (T = t,r = asset, sigma1 = sigma1, T_out = T_out)
GARCH11_fit = stan(file = 'GARCH11_STANCODE.stan', data = stan_input,
iter = 500, chains = 10)
stan_input = list (T = t,r = asset, sigma1 = sigma1, sigma2 = sigma2, T_out = T_out)
GARCH22_fit = stan(file = 'GARCH22_STANCODE.stan', data = stan_input, iter = 500, chains = 10)
return(list (G11_obj = GARCH11_fit, G22_obj = GARCH22_fit))
}
# ===========================================================
# price_to_return (fin_data)
#' @param fin_data - financial data t x n matrix
#' @export - returns matrix of returns
# ===========================================================
price_to_return = function (fin_data){
r_matrix = rep(0,length(fin_data[1,]))
T_in = length (fin_data[,1])
for (t in 2:T_in){
resid_curr = fin_data[t,] - fin_data[t-1,]
r_matrix = rbind(r_matrix,resid_curr)
}
return (r_matrix)
}
# ===========================================================
# plot_PostPrior (GARCH_object)
#' @param GARCH_obj is a GARCH object
#' @param is a matrix of a sector
#' @param bool_11 is true if obj is a GARCH(1,1), false other wise
#' @param sector is the sector which we are looking at (STR)
#' @export : Plots red curve for Posterior, Blue curve for prior
# ===========================================================
plot_PostPrior = function(obj, real_fin_data, bool_11, sector){
real_fin_data = as.vector(apply(real_fin_data,2,mean))
if (bool_11 == TRUE){
plot(density(extract(obj)$r_out),main = cat(sector,
"R_out Density curves - GARCH(1,1)"), col = 'blue')
lines(density((real_fin_data - mean(real_fin_data))/sd(real_fin_data)), col = 'red')
plot(density(extract(obj)$alpha0),main = cat(sector,
"alpha_0 Density curves - GARCH(1,1)"))
plot(density(extract(obj)$alpha1),main = cat(sector,
"alpha_1 Density curves - GARCH(1,1)"))
plot(density(extract(obj)$beta1),main = cat(sector,
"beta_1 Density curves - GARCH(1,1)"))
}
else {
plot(density(extract(obj)$r_out),main = cat(sector,
"R_out Density curves - GARCH(2,2)"))
lines(density((real_fin_data - mean(real_fin_data))/sd(real_fin_data)))
plot(density(extract(obj)$alpha0),main = cat(sector,
"alpha_0 Density curves - GARCH(2,2)"))
plot(density(extract(obj)$alpha1),main = cat(sector,
"alpha_1 Density curves - GARCH(2,2)"))
plot(density(extract(obj)$alpha2),main = cat(sector,
"alpha_2 Density curves - GARCH(2,2)"))
plot(density(extract(obj)$beta1),main = cat(sector,
"beta_1 Density curves - GARCH(2,2)"))
plot(density(extract(obj)$beta2),main = cat(sector,
"beta_2 Density curves - GARCH(2,2)"))
}
}
# ================================================================
# parameter_extraction (fin_data) : fin_data is a matrix
#' @param fin_data - matrix (t x n) of return data
#' @param sigma1 - initial sigma1 value to initalize GARCH objects
#' @param sigma2 - initial sigma2 value to initialize GARCH objects
#' @param T_out - number of time periods to predict
#' @export
# ===========================================================
parameter_extraction = function(fin_data,sigma1,sigma2,T_out){
t = length(fin_data[,1])
data_fin = rep(0,t)
for (i in 1:t){
data_fin[i] = mean(as.numeric(fin_data[i,]))
}
stan_input = list (T = t,r = data_fin, sigma1 = sigma1, T_out = T_out)
GARCH11_fit = stan(file = 'GARCH11_STANCODE.stan', data = stan_input,
iter = 500, chains = 10)
GARCH11_param = extract(GARCH11_fit)
stan_input = list (T = t,r = data_fin, sigma1 = sigma1,sigma2 = sigma2, T_out = T_out)
GARCH22_fit = stan(file = 'GARCH22_STANCODE.stan', data = stan_input, iter = 500, chains = 10)
GARCH22_param = extract(GARCH22_fit)
data_param = list(G11_a0 = mean(GARCH11_param$alpha0),
G11_a1 = mean(GARCH11_param$alpha1),
G11_b1 = mean(GARCH11_param$beta1),
G22_a0 = mean(GARCH22_param$alpha0),
G22_a1 = mean(GARCH22_param$alpha1),
G22_a2 = mean(GARCH22_param$alpha2),
G22_b1 = mean(GARCH22_param$beta1),
G22_b2 = mean(GARCH22_param$beta2))
return (c(data_param,GARCH22_fit,GARCH11_fit))
}
# ===========================================================
# MCMC_diagnostic (GARCH_object, Bool)
#' @param GARCH_object - GARCH object of either GARCH(1,1) or GARCH(2,2)
#' @param bool_11 - True if GARCH(1,1) object, False if GARCH(2,2)
# ===========================================================
MCMC_diagnostic = function(GARCH_object, bool_11){
GARCH_param = extract(GARCH_object)
plot (GARCH_param$alpha0,type = 'l',ylab = "Alpha0",main = "Trace Plot")
plot (GARCH_param$alpha1,type = 'l',ylab = "Alpha1",main = "Trace Plot")
plot (GARCH_param$beta1,type = 'l',ylab = "Beta1",main = "Trace Plot")
acf (GARCH_param$alpha0)
acf (GARCH_param$alpha1)
acf (GARCH_param$beta1)
if (bool_11 == FALSE){
plot (GARCH_param$alpha2,type = 'l',ylab = "Alpha2",main = "Trace Plot")
plot (GARCH_param$beta2,type = 'l',ylab = "Beta2",main = "Trace Plot")
acf (GARCH_param$alpha2)
acf (GARCH_param$beta2)
}
}
|
29da9f1e5f7df18ec4cda169802ebd4aba7ad338 | 3420dacc496da4fb837abc78d6c7a63ef21d23c1 | /Q3_Rcode.R | ec2525cb4718f48e157649aa80060263fb6a45c9 | [] | no_license | hartwemm/4MB3Assignment2 | 6813852b6815ae3070bd8ddb8ed8fe40c4fa8c3d | b09caea78e7bd3dc52a1b69c997a53dda5f514b3 | refs/heads/master | 2021-05-04T23:58:52.019540 | 2018-02-04T17:55:02 | 2018-02-04T17:55:02 | 119,454,756 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,883 | r | Q3_Rcode.R | ### Part a
library(deSolve)
### Part b
SIR.vector.field <- function(t, vars, parms=c(beta=2,gamma=1)) {
# Writes vector field of these differential equations
with(as.list(c(parms, vars)), {
dS <- -1*beta*S*I # dS/dt
dI <- beta*S*I-gamma*I # dI/dt
dR <- gamma*I # dR/dt
vec.fld <- c(dS=dS, dI=dI, dR=dR)
return(list(vec.fld)) # ode() requires a list
})
}
draw.soln <- function(ic=c(S=0.999,I=0.001,R=0), tmax=1,
times=seq(0,tmax,length.out = 500),
func, parms=c(R_0=2,gamma_inv=1), colour="blue", ... ) {
# Solve ode from vector field using deSolve package and
# initial condition and parameters given
with(as.list(parms),{
gamma=1/gamma_inv
beta=gamma*R_0
# times=times/gamma_inv # Convert time to "natural units" of periods of gamma
soln <- ode(y=ic, times=times, func, parms=c(beta=beta,gamma=gamma)) # Solve the ode
lines(times, soln[,"I"], col=colour, ... ) # Add this line to our plot
return(data.frame(time=times,y=soln[,"I"]))
})
}
tmax <- 10 # end time for numerical integration of the ODE
## draw box for plot:
plot(0,0,xlim=c(0,tmax),ylim=c(0,1),
type="n",xlab="Time (t)",ylab="Prevalence (I)",las=1)
## initial conditions:
I0 <- 0.001
S0 <- 1 - I0
R0 <- 0
## Set parameter values
Rknot_vals <- 2.5
gamma_inv <- 1
# Solve ode for these parameters and initial conditions
draw.soln(ic=c(S=S0,I=I0,R=R0), tmax=tmax,
func=SIR.vector.field,
parms=c(R_0=Rknot_vals,gamma_inv=gamma_inv,lwd=5)
)
# Part c
tikz("Q3Partc.tex")
tmax <- 40 # end time for numerical integration of the ODE
## initial conditions:
I0 <- 0.001
S0 <- 1 - I0
R0 <- 0
gamma_inv=4
Rknot_vals <- c(1.2,1.5,1.8,2,3,4)
## draw box for plot:
mymar <- par("mar")
mymar["left"] <- mymar["left"] * 0.5
mymar["right"] <- mymar["right"]*0.25
mymar["top"] <- mymar["top"]*0.15
mymar["bottom"] <- mymar["bottom"]*0.5
par(mar=mymar)
plot(0,0,xlim=c(0,tmax),ylim=c(0,0.5),
type="n",las=1,
xlab="Time (t)",
ylab="Prevalence (I)",
main="SIR curves with varied $\\R_0$ Values")
## draw solution for each value of Rknot:
for (i in 1:length(Rknot_vals)) {
draw.soln(ic=c(S=S0,I=I0,R=R0), tmax=tmax,
times=seq(0,tmax,length.out = 500),
func=SIR.vector.field,
parms=c(R_0=Rknot_vals[i],gamma_inv=gamma_inv), lwd=2,
colour=rainbow(length(Rknot_vals)+1)[i] # use a different line colour for each solution
)
}
legend("topright",legend=Rknot_vals,col=rainbow(length(Rknot_vals)+1),lwd=2)
dev.off()
## Part d
tmax <- 122 # end time for numerical integration of the ODE
philadata$t <- seq(1,nrow(philadata))
y1 <- data.frame(time=philadata$t,y=philadata$pim)
euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2))
# Errors <- matrix(NA,ncol=4,nrow=4*6*6)
## initial conditions:
I0 <- 0.0001
R0 <- 0.1
S0 <- 1 - I0 - R0
pop <- 8200
plot(philadata$t,philadata$pim/pop,pch=20,col="blue",cex=1.2)
gamma_inv <- 3.5
Rknot_vals <- 1.97
cols <- rainbow(6)
plot(0,0,xlim=c(0,tmax),ylim=c(0,0.1),xlab="Time (t)",
ylab="Prevalence (I)")
## draw solution for each value of Rknot:
for (i in 1:nrow(Errors)) {
### Lowest errors in the order i= 4,1,2,3,5 ####
I0 <- Errors[i,1][[1]]
R0 <- Errors[i,3][[1]]
S0 <- 1 - I0 - R0
pop <- Errors[i,4][[1]]
points(philadata$t,philadata$pim/pop,pch=20,col=cols[i],cex=1.2)
Rknot_vals <- Errors[i,5]
gamma_inv <- Errors[i,6]
y2 <- draw.soln(ic=c(S=S0,I=I0,R=R0), tmax=tmax,
times=seq(0,tmax,length.out = 488+1),
func=SIR.vector.field,
parms=c(R_0=Rknot_vals[[1]],gamma_inv=gamma_inv[[1]]), lwd=1,
colour=cols[i] # use a different line colour for each solution
)
func <- y2[which(y2$time %in% y1$time),"y"]
E <- euc.dist(y1[,2],func)
EEE <- c(I0,S0,R0,pop,Rknot_vals,gamma_inv,E)
EEE
} |
5ba989393916def04fb544b4b28b35ef3ef05599 | 6361479b3220f09837498043250003429460e421 | /initial.R | 6578b038380df41d1dd00e5a924f86c78006e622 | [] | no_license | momokeith1123/AFDBPRDVAR | 00ddff4126e9d9adb778e17090f4e06fe92cdfe2 | d79da5767f37ce98cba5db1a729b2901885c73fb | refs/heads/master | 2020-08-12T18:21:41.907237 | 2019-10-30T15:34:46 | 2019-10-30T15:34:46 | 214,818,637 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,640 | r | initial.R | # 3-
#
library('data.table')
library('dplyr')
library('openxlsx')
library(stringr)
library(rebus) # for the regular expressions String Manipulation in R with stringr
setwd(DIR[["function"]])
source("loadHvarResultFile.R")
# source("getSQL.r")
source("functionSQL.r")
# Load Security Fiter given a perfvalo run
sqlq <- getSQL(filepathSec)
sqlq <- ApplyFunctionParam (ImpTimeStamp, sqlq)
secFilter <- sqlQuery(con,sqlq)
setwd(DIR[["root"]])
if ("S.R" %in% list.files()) {
source("S.R")
} else {
S <- c( "GROUP", "GRP", "GRPSFT", "IDENTIFIER", "TRADE", "TRDGRPSFT")
dump(list = "S", "S.R")
}
invalid <- character(0)
if("invalid.R" %in% list.files()) source("invalid.R")
HVAR_RGRP <- list ()
# For each each run dates
for (i in seq_along(Rundates)) {
print (i)
# print(paste0(DIR[["data"]],"/", "spool", "_",Rundate[i], "/", "HVAR"))
setwd(paste0(DIR[["root"]],"/", "spool", "_",Rundates[i], "/", "HVAR"))
# For each files within the run
for(s in S){
suppressWarnings(
HVAR_RGRP[[ Rundates[i] ]][[s]] <- getHvarFiles(CONFIG[["ADBPRDVAR"]][[i]] ,Rundates[i],s)
)
}
# We update the column RCLASSID for GROUP in order to put ADB as riskfactor in global level
# we first identif the row number corrsponding GLOBAL in group file
grp <- HVAR_RGRP[[ Rundates[i] ]][["GROUP"]]
grp_row <- which(grp["GROUPID"]== "GLOBAL")
# Address groupidx
grp$GROUPIDX <- grp$GROUPIDX-1
grp <- grp %>% left_join(dt, by = c("GROUPNUM"))
HVAR_RGRP[[ Rundates[i] ]][["GROUP"]] <- grp
grpx <- grp %>% select(GROUPNUM,GROUPIDX,RCLASSID,DMINDEX, CCY1)
# Amend the others tab
HVAR_RGRP[[ Rundates[i] ]][["GRP"]] <- HVAR_RGRP[[ Rundates[i] ]][["GRP"]] %>% left_join(grpx, by = "GROUPIDX")
grpsft <- HVAR_RGRP[[ Rundates[i] ]][["GRPSFT"]] %>% left_join(grpx, by = "GROUPIDX")
HVAR_RGRP[[ Rundates[i] ]][["GRPSFT"]] <- grpsft %>%
mutate (RISKTYPE = case_when(
str_detect(DMINDEX, pattern = ANY_CHAR %R% "_F" %R% ANY_CHAR) == TRUE ~ "CS_VaR",
str_detect(DMINDEX, pattern = ANY_CHAR %R% "_G" %R% ANY_CHAR) == TRUE ~ "CS_VaR",
str_detect(DMINDEX, pattern = ANY_CHAR %R% "_Y" %R% ANY_CHAR) == TRUE ~ "CS_VaR",
str_detect(DMINDEX, pattern = ANY_CHAR %R% "_S" %R% ANY_CHAR) == TRUE ~ "CS_VaR",
str_detect(DMINDEX, pattern = ANY_CHAR %R% "_C" %R% ANY_CHAR) == TRUE ~ "CS_VaR",
# str_detect(DMINDEX, pattern = ANY_CHAR %R% "PRDEUR" %R% ANY_CHAR) == TRUE ~ "PRDEUR",
# str_detect(DMINDEX, pattern = ANY_CHAR %R% "PRDGBP" %R% ANY_CHAR) == TRUE ~ "PRDGBP",
# str_detect(DMINDEX, pattern = ANY_CHAR %R% "PRDUSD" %R% ANY_CHAR) == TRUE ~ "PRDUSD",
# str_detect(DMINDEX, pattern = ANY_CHAR %R% "PRDUSD" %R% ANY_CHAR) == TRUE ~ "PRDUSD",
# str_detect(DMINDEX, pattern = ANY_CHAR %R% "R2RUSD" %R% ANY_CHAR) == TRUE ~ "R2RUSD",
TRUE ~"IR"
))
# grpsft <- grpsft %>% select(GROUPIDX) )
# HVAR_RGRP[[ Rundates[i] ]][["TRDGRPSFT"]] <- HVAR_RGRP[[ Rundates[i] ]][["GRP"]] %>% left_join(grpx, by = "GROUPIDX")
# HVAR_RGRP[[ Rundates[i] ]][["GRP"]] <- HVAR_RGRP[[ Rundates[i] ]][["GRP"]] %>% left_join(dt, )
HVAR_RGRP[[ Rundates[i] ]][["GROUP"]] [["RCLASSID"]][[grp_row]] <- "ADBVAR"
# adding the desk and ccy in TRADE
trd <- HVAR_RGRP[[ Rundates[i] ]][["TRADE"]]
HVAR_RGRP[[ Rundates[i] ]][["TRADE"]] <- trd %>%
mutate (DESK = case_when(
str_detect(ALTID, pattern = ANY_CHAR %R% "GKEY" %R% ANY_CHAR) == TRUE ~ "GKEY",
str_detect(ALTID, pattern = ANY_CHAR %R% "MARGEUR" %R% ANY_CHAR) == TRUE ~ "MARGEUR",
str_detect(ALTID, pattern = ANY_CHAR %R% "MARGUSD" %R% ANY_CHAR) == TRUE ~ "MARGUSD",
str_detect(ALTID, pattern = ANY_CHAR %R% "OPPGBP" %R% ANY_CHAR) == TRUE ~ "OPPGBP",
str_detect(ALTID, pattern = ANY_CHAR %R% "OPPUSD" %R% ANY_CHAR) == TRUE ~ "OPPUSD",
str_detect(ALTID, pattern = ANY_CHAR %R% "PRDEUR" %R% ANY_CHAR) == TRUE ~ "PRDEUR",
str_detect(ALTID, pattern = ANY_CHAR %R% "PRDGBP" %R% ANY_CHAR) == TRUE ~ "PRDGBP",
str_detect(ALTID, pattern = ANY_CHAR %R% "PRDUSD" %R% ANY_CHAR) == TRUE ~ "PRDUSD",
str_detect(ALTID, pattern = ANY_CHAR %R% "PRDUSD" %R% ANY_CHAR) == TRUE ~ "PRDUSD",
str_detect(ALTID, pattern = ANY_CHAR %R% "R2RUSD" %R% ANY_CHAR) == TRUE ~ "R2RUSD",
TRUE ~"OTHERS"
)) %>%
mutate (RISKCCY = case_when(
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "USD" %R% ANY_CHAR) == TRUE ~ "USD",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "EUR" %R% ANY_CHAR) == TRUE ~ "EUR",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "GBP" %R% ANY_CHAR) == TRUE ~ "GBP",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "JPY" %R% ANY_CHAR) == TRUE ~ "JPY",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "CHF" %R% ANY_CHAR) == TRUE ~ "CHF",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "CNH" %R% ANY_CHAR) == TRUE ~ "CNH",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "CAD" %R% ANY_CHAR) == TRUE ~ "CAD",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "ZAR" %R% ANY_CHAR) == TRUE ~ "ZAR",
str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "CNY" %R% ANY_CHAR) == TRUE ~ "CNY",
# str_detect(DESCRIPTION, pattern = ANY_CHAR %R% "OTHERS" %R% ANY_CHAR) == TRUE ~ "OTHERS",
TRUE ~"OTHERS"
)) %>% arrange(DESK)
trd <-HVAR_RGRP[[ Rundates[i] ]][["TRADE"]]
# adding the contract or secid
for (row in 1:nrow (trd)) {
if (trd[["TYPE"]][[row]] == "BOND") {
trd [["SECID"]][[row]]= str_split(trd[["ALTID"]][[row]], "\\|")[[1]][[1]]
} else if (trd[["TYPE"]][[row]] == "FUT") {
trd [["SECID"]][[row]]= paste0 (str_split(trd[["ALTID"]][[row]], "\\|")[[1]][[1]],str_split(trd[["ALTID"]][[row]], "\\|")[[1]][[2]])
} else {
trd [["SECID"]][[row]]= ""
}
}
# adding Desk and Tradetype in TRDGRPSFT
# trdgpsft <- HVAR_RGRP[[ Rundates[i] ]][["TRADE"]]%>%select(TRADEIDX,DESK,TYPE,RISKCCY)
trdgpsft <- trd%>%select(-ID, ALTID, -VERSION, -DESCRIPTOR)
trd_gpsft <- left_join(HVAR_RGRP[[ Rundates[i] ]] [["TRDGRPSFT"]], trdgpsft , by = "TRADEIDX")
trd_gpsft <- trd_gpsft %>% left_join(grpx, by = "GROUPIDX")
# removing dmindex because not correct
# trd_gpsft
# adding column from secfilter
trd_gpsft <- trd_gpsft %>% left_join(secFilter, by = "SECID")
HVAR_RGRP[[ Rundates[i] ]] [["TRDGRPSFT"]] <- trd_gpsft
# grp_sft <- HVAR_RGRP[[ Rundates[i] ]] [["GRPSFT"]]
# grp_pnl <- grp_sft %>% filter()
# grouping <- HVAR_RGRP[[ Rundates[i] ]]$TRADE$TYPE
# split_trade <- split( HVAR_RGRP[[ Rundates[i] ]]$TRADE,grouping)
#
# BOND <- split_trade$BOND %>% mutate (IDENTIFIER = str_split(DESCRIPTION, pattern = "/")[[1]][[4]] )
dat <- HVAR_RGRP[[ Rundates[i] ]]
wb <- createWorkbook()
Map(function(data, name){
addWorksheet(wb, name)
writeData(wb, name, data)
}, dat, names(dat))
## Save workbook to working directory
saveWorkbook(wb, file = paste(CONFIG[["ADBPRDVAR"]],Rundates[[i]], "_AGG.xlsx"), overwrite = TRUE)
}
|
8eb7cd03ecb125ee3eaee10a58e22b30af5d35cd | 67c2a90c7edfac3cfd891cb332c45e71cf4a6ad1 | /R/osink.R | 2ca7b9157c6fe1acc75ee7f051d8004671010bb5 | [] | no_license | alexanderrobitzsch/CDM | 48316397029327f213967dd6370a709dd1bd2e0a | 7fde48c9fe331b020ad9c7d8b0ec776acbff6a52 | refs/heads/master | 2022-09-28T18:09:22.491208 | 2022-08-26T11:36:31 | 2022-08-26T11:36:31 | 95,295,826 | 21 | 11 | null | 2019-06-19T09:40:01 | 2017-06-24T12:19:45 | R | UTF-8 | R | false | false | 200 | r | osink.R | ## File Name: osink.R
## File Version: 1.09
osink <- function( file, suffix, append=FALSE)
{
if ( ! is.null( file ) ){
sink( paste0( file, suffix), split=TRUE, append=append )
}
}
|
c9676bfc6427ae00995c2f299f47ac09e6efa136 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/textreg/man/testCorpora.Rd | b9fca1cd2c060c4edf2159d8de6d91f610bddfd4 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 404 | rd | testCorpora.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package_and_data_documentation.R
\docType{data}
\name{testCorpora}
\alias{testCorpora}
\title{Some small, fake test corpora.}
\format{A list of dataframes}
\description{
A list of several fake documents along with some labeling schemes primarily used by the unit testing code.
Also used in some examples.
}
\keyword{datasets}
|
82eeed7096fe121f6d0195e0bef422bac734e062 | 1e9c4294652b0f4699d85516afd54fb5697b4800 | /r_exam/제대로_알고_쓰는_R_통계_분석/sourcebook/source/Chapter03/source/10.for_ex.R | cdd0a7f10da109602005c7f3bef5cc6f32d60cb6 | [] | no_license | mgh3326/GyeonggiBigDataSpecialist | 89c9fbf01036b35efca509ed3f74b9784e44ed19 | 29192a66df0913c6d9b525436772c8fd51a013ac | refs/heads/master | 2023-04-06T07:09:09.057634 | 2019-06-20T23:35:33 | 2019-06-20T23:35:33 | 138,550,772 | 3 | 2 | null | 2023-03-24T22:43:06 | 2018-06-25T06:10:59 | Jupyter Notebook | UTF-8 | R | false | false | 357 | r | 10.for_ex.R | v <- c(1, 4, 5)
for( i in v ) {
print( i )
}
r.n <- rnorm(10)
sum <- 0
for(i in 1:10) {
sum <- sum + r.n[i]
}
print(sum)
sum(r.n)
dan <- 2
for( i in 2:9 ) {
times <- dan * i
print(paste(dan, "곱하기", i, "=", times))
}
(m <- matrix(1:12, ncol=3))
for(i in 1:nrow(m)) {
for(j in 1:ncol(m)) {
cat( i, "행", j, "열 =", m[i,j], "\n")
}
} |
95ffcc872a473989e8ba7873c6a8354ff92ab4b6 | 2572e98ace5eb5f10a0ad2e11ab24f11b79a0aa9 | /testing/DX_GC.R | 8464316b01af00b9bb1ec7785af44015c2c4921a | [] | no_license | ncoutrakon/fin | fca1f02495c6b19d888e2184db32d9775c15d52b | 590ff249b80cce29459cc095c705cac22804e366 | refs/heads/master | 2021-06-08T06:17:19.466922 | 2016-10-26T08:05:39 | 2016-10-26T08:05:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 807 | r | DX_GC.R | #find.instrument("dollar")
#getInstrument("DX")
#GetAndClean("DXU5")
rm(list=ls())
.useDV()
dxlist <- future_id("DX", c(3, 6, 9, 12), year=2014:2015, format = "CY", sep="")
gclist <- future_id("GC", c(3, 6, 9, 12), year=2014:2015, format = "CY", sep="")
listofList = ls()
source("fmonth.R")
for (lst in listofList){
for (each in get(lst)) {
GetAndClean(each, gargs = list(dir = "../storage/sec/", use_identifier = NA,
extension = "RData"),
)
}
}
dxdata <- fmonth(dxlist)*1000
gcdata <- fmonth(gclist)*100
tdata <- merge.xts(gcdata, dxdata, join = "inner")
colnames(tdata) <- c("GC", "DX")
fit <- lm(tdata$GC ~ tdata$DX)
spread <- fit$coefficients[2]*dxdata + gcdata
rt <- log(spread) - log(lag(spread, 60))
chartSeries(spread)
chartSeries(rt)
|
614fbcaa55b3b1868947a3c58f41b908e556d907 | a747cd77ba47e0c62907a64ea4f3a235f280f124 | /man/tiger.Rd | 74f74912802ff37b836ced0f4950bf83ddeffd1c | [] | no_license | cran/rsvd | b4d24e0d50ab1855686be7c1a37ffbfbba3aac03 | 6357bde286052945966ca51b91590ee7f90affed | refs/heads/master | 2021-07-10T18:45:13.049205 | 2021-04-16T04:40:03 | 2021-04-16T04:40:03 | 48,087,870 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 582 | rd | tiger.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tiger.R
\docType{data}
\name{tiger}
\alias{tiger}
\title{Tiger}
\format{
An object of class \code{\link[rsvd]{rsvd}}.
}
\source{
\href{https://en.wikipedia.org/wiki/File:Siberischer_tiger_de_edit02.jpg}{Wikimedia}
}
\usage{
data('tiger')
}
\description{
1600x1200 grayscaled (8 bit [0-255]/255) image.
}
\examples{
\dontrun{
library('rsvd')
data('tiger')
#Display image
image(tiger, col = gray((0:255)/255))
}
}
\references{
S. Taheri (2006). "Panthera tigris altaica", (Online image)
}
\keyword{image}
|
5dc4b941bab0181423d7bf88acf4f65afd737ea4 | a8e8000b370d54c2f6a097ee59876827f4daafbe | /9.8/2p.R | 70548598f9da9dd4584bd466a591538895c1e1de | [] | no_license | weidaoming/R | 142be073ebdf097740ae5a02a7e75308a06e30d1 | 5048ca1d46025ba41d03b00049a17b309e8dfedc | refs/heads/master | 2021-07-12T10:47:27.552074 | 2017-10-18T07:09:09 | 2017-10-18T07:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 128 | r | 2p.R | #比例检验
#pwr.2p.test(h=,n=,sig.level=,power=)
pwr.2p.test(h=ES.h(.65,.6),sig.level = .05,power=.9,alternative = "greater") |
5e0736a025ae59c19e141a67e05ae9092d3a999c | 3f4d651c3d7431db4da76e7b89031911dc6eb913 | /R/dfe_acad_year.R | 5623230b6d26bb3a7b90c7104fe3e7bb57ed1a59 | [] | no_license | TomFranklin/dferap | 394ada7b7b28da2a761b8016d97c2a0e8fd30c6d | a43e34e4d7f1ee1808028c32f4f369eef716bfc1 | refs/heads/master | 2020-04-03T10:40:30.294751 | 2018-10-29T16:05:58 | 2018-10-29T16:05:58 | 155,199,335 | 0 | 0 | null | 2018-10-29T16:05:59 | 2018-10-29T11:19:09 | R | UTF-8 | R | false | false | 827 | r | dfe_acad_year.R | #' @title Change the style of academic year
#'
#' @description The \code{dfe_acad_year} function converts academic year numbers e.g. 201213 into strings with a forwward slash "2012/13"
#'
#'
#'
#' @details The registers are sorted by publication date and name by
#' alphabetical order. The top five registers are output as a character string
#' including commas and an and, for inclusion in the report.
#'
#' @param year is the acadmic year we'll input, e.g. 201213 which will be converted into 2012/13
#'
#' @return Returns a character string
#'
#' @examples
#'
#' library(dferap)
#' dfe_acad_year(201213)
#' "2012/13"
#'
#'
#' @export
#'
dfe_acad_year <- function(year) {
if (!grepl("^[0-9]{6,6}$",year))
stop("year parameter must be a six didgit number e.g. 201617")
sub("(.{4})(.*)", "\\1/\\2", year)
}
|
db7bcf32b363237c7ab0b365af5d15e643ca2a8c | 8663fe97b8f27cd63e8957f5c74413a208429ea3 | /snp_reads.R | 121ce23d5cff59d1bc90cba95a95494dc8686502 | [] | no_license | Alexander-Palmer/hide-and-RNA-seq | 9dd20824de3292e6ef691f40dd95a939df1b4e6b | b24225feaa48b9f31e210e29c94996ac2fe46735 | refs/heads/master | 2021-06-13T00:57:08.858995 | 2021-04-27T04:50:48 | 2021-04-27T04:50:48 | 185,336,932 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 51,567 | r | snp_reads.R | ###Find sum of columns containing original SNP type
fileNames <- Sys.glob("*.txt")
#Create average columns
for (fileName in fileNames) {
sample <- read.delim(fileName)
sample[paste('av_284')] <- (sample[["A284_1"]] + sample[["A284_2"]])/2
sample[paste('av_285')] <- (sample[["A285_1"]] + sample[["A285_2"]])/2
sample[paste('av_286')] <- (sample[["A286_1"]] + sample[["A286_2"]])/2
sample[paste('av_287')] <- (sample[["A287_1"]] + sample[["A287_2"]])/2
sample[paste('av_290')] <- (sample[["A290_1"]] + sample[["A290_2"]])/2
sample[paste('av_291')] <- (sample[["A291_1"]] + sample[["A291_2"]])/2
sample[paste('Intcomb')] <- (sample[["av_284"]] + sample[["av_285"]])/2
sample[paste('Neucomb')] <- (sample[["av_286"]] + sample[["av_287"]])/2
sample[paste('Muscomb')] <- (sample[["av_290"]] + sample[["av_291"]])/2
sample[paste('av_N2')] <- (sample[["N2_1"]] + sample[["N2_2"]])/2
sample[paste('av_ALG1')] <- (sample[["av_284"]] + sample[["av_286"]] + sample[["av_290"]])/3
sample[paste('av_ALG2')] <- (sample[["av_285"]] + sample[["av_287"]] + sample[["av_291"]])/3
sample["SNP_A"] <- 0
sample["SNP_T"] <- 0
sample["SNP_G"] <- 0
sample["SNP_C"] <- 0
sample["N2_A"] <- 0
sample["N2_T"] <- 0
sample["N2_G"] <- 0
sample["N2_C"] <- 0
write.table(sample,
fileName,
append = FALSE,
quote = FALSE,
sep = "\t",
row.names = FALSE,
col.names = TRUE)
}
IntAlg1_A <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG1_A.txt", header=F)$V1
IntAlg1_A <- as.character(IntAlg1_A)
IntAlg1_T <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG1_U.txt", header=F)$V1
IntAlg1_T <- as.character(IntAlg1_T)
IntAlg1_G <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG1_G.txt", header=F)$V1
IntAlg1_G <- as.character(IntAlg1_G)
IntAlg1_C <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG1_C.txt", header=F)$V1
IntAlg1_C <- as.character(IntAlg1_C)
NeuAlg1_A <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG1_A.txt", header=F)$V1
NeuAlg1_A <- as.character(NeuAlg1_A)
NeuAlg1_T <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG1_U.txt", header=F)$V1
NeuAlg1_T <- as.character(NeuAlg1_T)
NeuAlg1_G <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG1_G.txt", header=F)$V1
NeuAlg1_G <- as.character(NeuAlg1_G)
NeuAlg1_C <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG1_C.txt", header=F)$V1
NeuAlg1_C <- as.character(NeuAlg1_C)
MusAlg1_A <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG1_A.txt", header=F)$V1
MusAlg1_A <- as.character(MusAlg1_A)
MusAlg1_T <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG1_U.txt", header=F)$V1
MusAlg1_T <- as.character(MusAlg1_T)
MusAlg1_G <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG1_G.txt", header=F)$V1
MusAlg1_G <- as.character(MusAlg1_G)
MusAlg1_C <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG1_C.txt", header=F)$V1
MusAlg1_C <- as.character(MusAlg1_C)
IntAlg2_A <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG2_A.txt", header=F)$V1
IntAlg2_A <- as.character(IntAlg2_A)
IntAlg2_T <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG2_U.txt", header=F)$V1
IntAlg2_T <- as.character(IntAlg2_T)
IntAlg2_G <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG2_G.txt", header=F)$V1
IntAlg2_G <- as.character(IntAlg2_G)
IntAlg2_C <- read.delim("X_List_of_sig_SNP_miRNA_int_ALG2_C.txt", header=F)$V1
IntAlg2_C <- as.character(IntAlg2_C)
NeuAlg2_A <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG2_A.txt", header=F)$V1
NeuAlg2_A <- as.character(NeuAlg2_A)
NeuAlg2_T <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG2_U.txt", header=F)$V1
NeuAlg2_T <- as.character(NeuAlg2_T)
NeuAlg2_G <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG2_G.txt", header=F)$V1
NeuAlg2_G <- as.character(NeuAlg2_G)
NeuAlg2_C <- read.delim("X_List_of_sig_SNP_miRNA_neu_ALG2_C.txt", header=F)$V1
NeuAlg2_C <- as.character(NeuAlg2_C)
MusAlg2_A <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG2_A.txt", header=F)$V1
MusAlg2_A <- as.character(MusAlg2_A)
MusAlg2_T <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG2_U.txt", header=F)$V1
MusAlg2_T <- as.character(MusAlg2_T)
MusAlg2_G <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG2_G.txt", header=F)$V1
MusAlg2_G <- as.character(MusAlg2_G)
MusAlg2_C <- read.delim("X_List_of_sig_SNP_miRNA_mus_ALG2_C.txt", header=F)$V1
MusAlg2_C <- as.character(MusAlg2_C)
ALG1_A <- Reduce(union, list(IntAlg1_A, MusAlg1_A, NeuAlg1_A))
ALG1_T <- Reduce(union, list(IntAlg1_T, MusAlg1_T, NeuAlg1_T))
ALG1_G <- Reduce(union, list(IntAlg1_G, MusAlg1_G, NeuAlg1_G))
ALG1_C <- Reduce(union, list(IntAlg1_C, MusAlg1_C, NeuAlg1_C))
ALG2_A <- Reduce(union, list(IntAlg2_A, MusAlg2_A, NeuAlg2_A))
ALG2_T <- Reduce(union, list(IntAlg2_T, MusAlg2_T, NeuAlg2_T))
ALG2_G <- Reduce(union, list(IntAlg2_G, MusAlg2_G, NeuAlg2_G))
ALG2_C <- Reduce(union, list(IntAlg2_C, MusAlg2_C, NeuAlg2_C))
Int_A <- Reduce(union, list(IntAlg1_A, IntAlg2_A))
Int_T <- Reduce(union, list(IntAlg1_T, IntAlg2_T))
Int_G <- Reduce(union, list(IntAlg1_G, IntAlg2_G))
Int_C <- Reduce(union, list(IntAlg1_C, IntAlg2_C))
Neu_A <- Reduce(union, list(NeuAlg1_A, NeuAlg2_A))
Neu_T <- Reduce(union, list(NeuAlg1_T, NeuAlg2_T))
Neu_G <- Reduce(union, list(NeuAlg1_G, NeuAlg2_G))
Neu_C <- Reduce(union, list(NeuAlg1_C, NeuAlg2_C))
Mus_A <- Reduce(union, list(MusAlg1_A, MusAlg2_A))
Mus_T <- Reduce(union, list(MusAlg1_T, MusAlg2_T))
Mus_G <- Reduce(union, list(MusAlg1_G, MusAlg2_G))
Mus_C <- Reduce(union, list(MusAlg1_C, MusAlg2_C))
###########################################################################
#SNPs (A) common to intestine Alg1 (>7nt)
datalist_ALG2_T = list()
for (FileName in IntAlg1_A) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,24]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Intestine_ALG1_output_A_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Intestine_ALG1_output_A_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (T) common to intestine alg1 (>7nt)
datalist_ALG2_T = list()
for (FileName in IntAlg1_T) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,24]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Intestine_ALG1_output_T_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Intestine_ALG1_output_T_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (G) common to intestine Alg1 (>7nt)
datalist_ALG2_G = list()
for (FileName in IntAlg1_G) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,24]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_G[[FileName]] <- sample
}
datalist_ALG2_G <- do.call(rbind.data.frame, datalist_ALG2_G)
SNP_A_ALG2 <- sum(datalist_ALG2_G$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_G$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_G$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_G$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_G$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_G$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_G$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_G$N2_C)
ALG2_output_G <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_G) <- c("Total Reads")
ALG2_output_G
write.table(ALG2_output_G, "Intestine_ALG1_output_G_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_G, "Intestine_ALG1_output_G_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (C) common to intestine alg1 (>7nt)
datalist_ALG2_C = list()
for (FileName in IntAlg1_C) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,24]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,24], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_C[[FileName]] <- sample
}
datalist_ALG2_C <- do.call(rbind.data.frame, datalist_ALG2_C)
SNP_A_ALG2 <- sum(datalist_ALG2_C$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_C$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_C$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_C$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_C$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_C$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_C$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_C$N2_C)
ALG2_output_C <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_C) <- c("Total Reads")
ALG2_output_C
write.table(ALG2_output_C, "Intestine_ALG1_output_C_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_C, "Intestine_ALG1_output_C_7_meta.txt", sep="\t", col.names = NA)
#########################################################################################################################
###########################################################################
#SNPs (A) common to neuron alg1 (>7nt)
datalist_ALG2_A = list()
for (FileName in NeuAlg1_A) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,26]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_A[[FileName]] <- sample
}
datalist_ALG2_A <- do.call(rbind.data.frame, datalist_ALG2_A)
SNP_A_ALG2 <- sum(datalist_ALG2_A$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_A$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_A$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_A$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_A$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_A$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_A$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_A$N2_C)
ALG2_output_A <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_A) <- c("Total Reads")
ALG2_output_A
write.table(ALG2_output_A, "Neuron_ALG1_output_A_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_A, "Neuron_ALG1_output_A_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (T) common to neuron alg1 (>7nt)
datalist_ALG2_T = list()
for (FileName in NeuAlg1_T) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,26]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Neuron_ALG1_output_T_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Neuron_ALG1_output_T_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (G) common to neuron alg1 (>7nt)
datalist_ALG2_G = list()
for (FileName in NeuAlg1_G) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,26]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_G[[FileName]] <- sample
}
datalist_ALG2_G <- do.call(rbind.data.frame, datalist_ALG2_G)
SNP_A_ALG2 <- sum(datalist_ALG2_G$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_G$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_G$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_G$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_G$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_G$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_G$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_G$N2_C)
ALG2_output_G <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_G) <- c("Total Reads")
ALG2_output_G
write.table(ALG2_output_G, "Neuron_ALG1_output_G_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_G, "Neuron_ALG1_output_G_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (C) common to neuron alg1 (>7nt)
datalist_ALG1_C = list()
for (FileName in NeuAlg1_C) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,26]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,26], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG1_C[[FileName]] <- sample
}
datalist_ALG1_C <- do.call(rbind.data.frame, datalist_ALG1_C)
SNP_A_ALG1 <- sum(datalist_ALG1_C$SNP_A)
N2_A_ALG1 <- sum(datalist_ALG1_C$N2_A)
SNP_T_ALG1 <- sum(datalist_ALG1_C$SNP_T)
N2_T_ALG1 <- sum(datalist_ALG1_C$N2_T)
SNP_G_ALG1 <- sum(datalist_ALG1_C$SNP_G)
N2_G_ALG1 <- sum(datalist_ALG1_C$N2_G)
SNP_C_ALG1 <- sum(datalist_ALG1_C$SNP_C)
N2_C_ALG1 <- sum(datalist_ALG1_C$N2_C)
ALG1_output_C <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG1_output_C) <- c("Total Reads")
ALG1_output_C
write.table(ALG1_output_C, "Neuron_ALG1_output_C_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_C, "Neuron_ALG1_output_C_7_meta.txt", sep="\t", col.names = NA)
#########################################################################################################################
###########################################################################
#SNPs (A) common to muscle alg1 (>7nt)
datalist_ALG2_A = list()
for (FileName in MusAlg1_A) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,28]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_A[[FileName]] <- sample
}
datalist_ALG2_A <- do.call(rbind.data.frame, datalist_ALG2_A)
SNP_A_ALG2 <- sum(datalist_ALG2_A$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_A$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_A$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_A$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_A$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_A$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_A$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_A$N2_C)
ALG2_output_A <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_A) <- c("Total Reads")
ALG2_output_A
write.table(ALG2_output_A, "Muscle_ALG1_output_A_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_A, "Muscle_ALG1_output_A_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (T) common to muscle alg1 (>7nt)
datalist_ALG2_T = list()
for (FileName in MusAlg1_T) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,28]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Muscle_ALG1_output_T_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Muscle_ALG1_output_T_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (G) common to muscle alg1 (>7nt)
datalist_ALG2_G = list()
for (FileName in MusAlg1_G) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,28]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_G[[FileName]] <- sample
}
datalist_ALG2_G <- do.call(rbind.data.frame, datalist_ALG2_G)
SNP_A_ALG2 <- sum(datalist_ALG2_G$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_G$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_G$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_G$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_G$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_G$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_G$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_G$N2_C)
ALG2_output_G <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_G) <- c("Total Reads")
ALG2_output_G
write.table(ALG2_output_G, "Muscle_ALG1_output_G_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_G, "Muscle_ALG1_output_G_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (C) common to muscle alg1 (>7nt)
datalist_ALG2_C = list()
for (FileName in MusAlg1_C) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,28]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,28], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_C[[FileName]] <- sample
}
datalist_ALG2_C <- do.call(rbind.data.frame, datalist_ALG2_C)
SNP_A_ALG2 <- sum(datalist_ALG2_C$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_C$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_C$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_C$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_C$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_C$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_C$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_C$N2_C)
ALG2_output_C <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_C) <- c("Total Reads")
ALG2_output_C
write.table(ALG2_output_C, "Muscle_ALG1_output_C_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_C, "Muscle_ALG1_output_C_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (A) common to intestine alg2 (>7nt)
datalist_ALG2_T = list()
for (FileName in IntAlg2_A) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,25]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Intestine_ALG2_output_A_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Intestine_ALG2_output_A_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (T) common to intestine alg2 (>7nt)
datalist_ALG2_T = list()
for (FileName in IntAlg2_T) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,25]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Intestine_ALG2_output_T_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Intestine_ALG2_output_T_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (G) common to intestine alg2 (>7nt)
datalist_ALG2_G = list()
for (FileName in IntAlg2_G) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,25]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_G[[FileName]] <- sample
}
datalist_ALG2_G <- do.call(rbind.data.frame, datalist_ALG2_G)
SNP_A_ALG2 <- sum(datalist_ALG2_G$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_G$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_G$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_G$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_G$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_G$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_G$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_G$N2_C)
ALG2_output_G <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_G) <- c("Total Reads")
ALG2_output_G
write.table(ALG2_output_G, "Intestine_ALG2_output_G_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_G, "Intestine_ALG2_output_G_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (C) common to intestine alg2 (>7nt)
datalist_ALG2_C = list()
for (FileName in IntAlg2_C) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,25]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,25], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_C[[FileName]] <- sample
}
datalist_ALG2_C <- do.call(rbind.data.frame, datalist_ALG2_C)
SNP_A_ALG2 <- sum(datalist_ALG2_C$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_C$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_C$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_C$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_C$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_C$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_C$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_C$N2_C)
ALG2_output_C <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_C) <- c("Total Reads")
ALG2_output_C
write.table(ALG2_output_C, "Intestine_ALG2_output_C_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_C, "Intestine_ALG2_output_C_7_meta.txt", sep="\t", col.names = NA)
#########################################################################################################################
###########################################################################
#SNPs (A) common to neuron alg2 (>7nt)
datalist_ALG2_A = list()
for (FileName in NeuAlg2_A) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,27]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_A[[FileName]] <- sample
}
datalist_ALG2_A <- do.call(rbind.data.frame, datalist_ALG2_A)
SNP_A_ALG2 <- sum(datalist_ALG2_A$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_A$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_A$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_A$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_A$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_A$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_A$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_A$N2_C)
ALG2_output_A <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_A) <- c("Total Reads")
ALG2_output_A
write.table(ALG2_output_A, "Neuron_ALG2_output_A_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_A, "Neuron_ALG2_output_A_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (T) common to neuron alg2 (>7nt)
datalist_ALG2_T = list()
for (FileName in NeuAlg2_T) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,27]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Neuron_ALG2_output_T_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Neuron_ALG2_output_T_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (G) common to neuron alg2 (>7nt)
datalist_ALG2_G = list()
for (FileName in NeuAlg2_G) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,27]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_G[[FileName]] <- sample
}
datalist_ALG2_G <- do.call(rbind.data.frame, datalist_ALG2_G)
SNP_A_ALG2 <- sum(datalist_ALG2_G$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_G$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_G$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_G$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_G$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_G$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_G$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_G$N2_C)
ALG2_output_G <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_G) <- c("Total Reads")
ALG2_output_G
write.table(ALG2_output_G, "Neuron_ALG2_output_G_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_G, "Neuron_ALG2_output_G_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (C) common to neuron alg2 (>7nt)
datalist_ALG2_C = list()
for (FileName in NeuAlg2_C) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,27]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,27], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_C[[FileName]] <- sample
}
datalist_ALG2_C <- do.call(rbind.data.frame, datalist_ALG2_C)
SNP_A_ALG2 <- sum(datalist_ALG2_C$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_C$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_C$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_C$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_C$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_C$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_C$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_C$N2_C)
ALG2_output_C <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_C) <- c("Total Reads")
ALG2_output_C
write.table(ALG2_output_C, "Neuron_ALG2_output_C_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_C, "Neuron_ALG2_output_C_7_meta.txt", sep="\t", col.names = NA)
#########################################################################################################################
###########################################################################
#SNPs (A) common to muscle alg2 (>7nt)
datalist_ALG2_A = list()
for (FileName in MusAlg2_A) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,29]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_A[[FileName]] <- sample
}
datalist_ALG2_A <- do.call(rbind.data.frame, datalist_ALG2_A)
SNP_A_ALG2 <- sum(datalist_ALG2_A$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_A$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_A$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_A$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_A$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_A$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_A$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_A$N2_C)
ALG2_output_A <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_A) <- c("Total Reads")
ALG2_output_A
write.table(ALG2_output_A, "Muscle_ALG2_output_A_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_A, "Muscle_ALG2_output_A_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (T) common to muscle alg2 (>7nt)
datalist_ALG2_T = list()
for (FileName in MusAlg2_T) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,29]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_T[[FileName]] <- sample
}
datalist_ALG2_T <- do.call(rbind.data.frame, datalist_ALG2_T)
SNP_A_ALG2 <- sum(datalist_ALG2_T$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_T$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_T$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_T$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_T$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_T$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_T$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_T$N2_C)
ALG2_output_T <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_T) <- c("Total Reads")
ALG2_output_T
write.table(ALG2_output_T, "Muscle_ALG2_output_T_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_T, "Muscle_ALG2_output_T_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (G) common to muscle alg2 (>7nt)
datalist_ALG2_G = list()
for (FileName in MusAlg2_G) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,29]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_G[[FileName]] <- sample
}
datalist_ALG2_G <- do.call(rbind.data.frame, datalist_ALG2_G)
SNP_A_ALG2 <- sum(datalist_ALG2_G$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_G$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_G$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_G$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_G$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_G$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_G$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_G$N2_C)
ALG2_output_G <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_G) <- c("Total Reads")
ALG2_output_G
write.table(ALG2_output_G, "Muscle_ALG2_output_G_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_G, "Muscle_ALG2_output_G_7_meta.txt", sep="\t", col.names = NA)
###########################################################################
#SNPs (C) common to muscle alg2 (>7nt)
datalist_ALG2_C = list()
for (FileName in MusAlg2_C) {
sample <- read.delim(FileName)
sample <- as.data.frame(sample)
sample$Pos <- ifelse(as.numeric(sample[,29]) > 10, sample[,23], 0)
sample$SNP_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_A <- ifelse(grepl("A", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_T <- ifelse(grepl("T", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_G <- ifelse(grepl("G", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$SNP_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,29], 0)
sample$N2_C <- ifelse(grepl("C", sample[,21]) & as.numeric(sample[,23]) > 7, sample[,33], 0)
sample$FileName <- FileName
datalist_ALG2_C[[FileName]] <- sample
}
datalist_ALG2_C <- do.call(rbind.data.frame, datalist_ALG2_C)
SNP_A_ALG2 <- sum(datalist_ALG2_C$SNP_A)
N2_A_ALG2 <- sum(datalist_ALG2_C$N2_A)
SNP_T_ALG2 <- sum(datalist_ALG2_C$SNP_T)
N2_T_ALG2 <- sum(datalist_ALG2_C$N2_T)
SNP_G_ALG2 <- sum(datalist_ALG2_C$SNP_G)
N2_G_ALG2 <- sum(datalist_ALG2_C$N2_G)
SNP_C_ALG2 <- sum(datalist_ALG2_C$SNP_C)
N2_C_ALG2 <- sum(datalist_ALG2_C$N2_C)
ALG2_output_C <- rbind(SNP_A_ALG2, SNP_T_ALG2, SNP_G_ALG2, SNP_C_ALG2,
N2_A_ALG2, N2_T_ALG2, N2_G_ALG2, N2_C_ALG2)
colnames(ALG2_output_C) <- c("Total Reads")
ALG2_output_C
write.table(ALG2_output_C, "Muscle_ALG2_output_C_7.txt", sep="\t", col.names = NA)
write.table(datalist_ALG2_C, "Muscle_ALG2_output_C_7_meta.txt", sep="\t", col.names = NA)
|
3c9953846d718c2e6e63c84bee4363755586ec8c | fd25b96601cf7ac4e7762183c0faeec87963aede | /supervised/image_process3.R | 17921c3ff0dbac7a136e997d369b1256daec8759 | [] | no_license | edz504/plankton | 08b9cf88db36a97a89b33d3e06da3bb169e15e5e | 3d45f585bd8644feaa9f991402c80d0fc3e0d548 | refs/heads/master | 2021-01-13T02:05:55.243329 | 2015-02-09T03:56:02 | 2015-02-09T03:56:02 | 28,106,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,252 | r | image_process3.R | # using some features from EBImage package
# INSTEAD of pixel hues
library(EBImage)
# count the number of total images
wd.top <- "C:/Users/edz504/Documents/Data Science Projects/Kaggle/plankton"
wd.train <- paste(wd.top, "/train", sep="")
setwd(wd.train)
num.img <- 0
for (class in list.files()) {
setwd(paste(wd.train, "/", class, sep=""))
num.img <- num.img + length(list.files())
}
train.data.1 <- matrix(nrow = num.img, ncol = 13)
labels <- rep(NA, num.img)
setwd(wd.train)
c <- 1
i <- 1
for (class in list.files()) {
setwd(paste(wd.train, "/", class, sep=""))
for (file in list.files()) {
img <- readImage(file)
# adaptive threshold
img2 <- thresh(img)
segment.labels <- bwlabel(img2)
### ratio feature
# find largest non-background object
obj.label <- which.max(table(as.vector(segment.labels))[-1])
if (length(obj.label) == 0) {
ratio <- 0
} else {
obj.inds <- which(segment.labels == obj.label,
arr.ind=TRUE)
ratio <- diff(range(obj.inds[,2])) / diff(range(obj.inds[,1]))
}
train.data.1[i, 1] <- ratio
### EBImage Features
# shape features
shape.feat <- computeFeatures.shape(segment.labels)
if (max(segment.labels) == 0) {
train.data.1[i, 2:7] <- rep(NA, 6)
} else {
train.data.1[i, 2:7] <- shape.feat[which.max(shape.feat[,1]),]
}
# resized shape features
img.resize <- resize(img, 30, 30)
img.resize.thresh <- thresh(img.resize)
resize.segment.labels <- bwlabel(img.resize.thresh)
shape.feat.resize <- computeFeatures.shape(resize.segment.labels)
if (max(resize.segment.labels) == 0) {
train.data.1[i, 8:13] <- rep(NA, 6)
} else {
train.data.1[i, 8:13] <- shape.feat.resize[which.max(shape.feat.resize[,1]),]
}
labels[i] <- c
i <- i + 1
}
c <- c + 1
cat("Finished class '", class, "'\n", sep='')
}
# make labels a factor
labels <- factor(labels)
# fix NaNs, and Inf values
train.data.1[which(is.nan(train.data.1), arr.ind=TRUE)] <- 0
inf.inds <- which(train.data.1 == Inf, arr.ind=TRUE)
train.data.1[inf.inds] <- -1
this.max <- max(train.data.1[, 1])
# somewhat arbitrary
train.data.1[inf.inds] <- this.max
# scale and save the training data
train.data.scaled <- scale(train.data.1)
setwd(wd.top)
save(train.data.scaled, labels, file="training_EBfeat.RData")
# and testing data
setwd("test")
test.data.1 <- matrix(nrow = length(list.files()),
ncol = 13)
c <- 1
i <- 1
for (file in list.files()) {
if (i %% 10000 == 0) {
cat(i, "\n")
}
img <- readImage(file)
# adaptive threshold
img2 <- thresh(img)
segment.labels <- bwlabel(img2)
### ratio feature
# find largest non-background object
obj.label <- which.max(table(as.vector(segment.labels))[-1])
if (length(obj.label) == 0) {
ratio <- 0
} else {
obj.inds <- which(segment.labels == obj.label,
arr.ind=TRUE)
ratio <- diff(range(obj.inds[,2])) / diff(range(obj.inds[,1]))
}
test.data.1[i, 1] <- ratio
### EBImage Features
shape.feat <- computeFeatures.shape(segment.labels)
if (max(segment.labels) == 0) {
test.data.1[i, 2:7] <- rep(NA, 6)
} else {
test.data.1[i, 2:7] <- shape.feat[which.max(shape.feat[,1]),]
}
img.resize <- resize(img, 30, 30)
img.resize.thresh <- thresh(img.resize)
resize.segment.labels <- bwlabel(img.resize.thresh)
shape.feat.resize <- computeFeatures.shape(resize.segment.labels)
if (max(resize.segment.labels) == 0) {
test.data.1[i, 8:13] <- rep(NA, 6)
} else {
test.data.1[i, 8:13] <- shape.feat.resize[which.max(shape.feat.resize[,1]),]
}
i <- i + 1
}
# fix NaNs, and Inf values
test.data.1[which(is.nan(test.data.1), arr.ind=TRUE)] <- 0
inf.inds <- which(test.data.1 == Inf, arr.ind=TRUE)
test.data.1[inf.inds] <- -1
this.max <- max(test.data.1[, 1])
test.data.1[inf.inds] <- this.max
# scale and save testing data
test.data.scaled <- scale(test.data.1)
setwd(wd.top)
save(test.data.scaled, file="testing_EBfeat.RData")
|
98e61cb31ad8768496d7d69d743b0ad9566742bc | bab6c49cc2eb5fe74dcbfbd77d352d5df3aa375f | /RPackage/tests/testthat/test-horvitzThompson.R | c4e2cdfe18c7ba41b9ad0d3c15d87ab12b1afdfd | [] | no_license | rohan-shah/chordalGraph | a32671fc1c8cb2004058622e7c23d1c67c677232 | 49a7a7649e24379c1f632d25a109ecac2ad770bd | refs/heads/master | 2021-01-18T21:57:30.341495 | 2017-06-26T01:56:45 | 2017-06-26T01:56:45 | 42,837,660 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,000 | r | test-horvitzThompson.R | context("Test horvitzThompson function")
test_that("Horvitz Thompson algorithm gives unbiased results for 5 x 5 graph",
{
data(exact5, envir = environment())
nReps <- 600
results <- matrix(data=NA, nrow = nReps, ncol = 11)
for(reduceChains in c(TRUE, FALSE))
{
for(i in 1:nReps)
{
capture.output(results[i,] <- as.numeric(horvitzThompson(nVertices = 5, seed = i, budget = 10, options = list(reduceChains = reduceChains, graphRepresentation = "matrix"))@data))
}
means <- apply(results, 2, mean)
for(edgeCount in 1:11)
{
expect_equal(means[edgeCount], as.numeric(exact5@data[edgeCount]), tolerance = 0.01)
}
}
})
test_that("Horvitz Thompson algorithm gives identical results for different values of graphRepresentation",
{
nReps <- 10
for(reduceChains in c(TRUE, FALSE))
{
for(i in 1:nReps)
{
capture.output(resultList <- horvitzThompson(nVertices = 5, seed = i, budget = 10, options = list(reduceChains = reduceChains, graphRepresentation = "list")))
capture.output(resultMatrix <- horvitzThompson(nVertices = 5, seed = i, budget = 10, options = list(reduceChains = reduceChains, graphRepresentation = "matrix")))
resultList@call <- resultMatrix@call <- call("list")
resultList@options <- resultMatrix@options <- list()
resultList@start <- resultList@end <- resultMatrix@start <- resultMatrix@end
expect_identical(resultList, resultMatrix)
}
}
})
test_that("Horvitz Thompson algorithm gives unbiased results for 6 x 6 graph",
{
data(exact6, envir = environment())
nReps <- 600
for(reduceChains in c(TRUE, FALSE))
{
results <- matrix(data=NA, nrow = nReps, ncol = 16)
for(i in 1:nReps)
{
capture.output(results[i,] <- as.numeric(horvitzThompson(nVertices = 6, seed = i, budget = 30, options = list(reduceChains = reduceChains, graphRepresentation = "matrix"))@data))
}
means <- apply(results, 2, mean)
for(edgeCount in 1:16)
{
expect_equal(means[edgeCount], as.numeric(exact6@data[edgeCount]), tolerance = 0.01)
}
}
}) |
736dacf98c33155e3335bf41e64d1d68e22d8035 | bbc3943cfd57260dfc3c0603a48a80eadfd19220 | /man/plot_rw.Rd | d8841a07ad399706e617953ddd2230d8727f2e63 | [] | no_license | umatter/netensembleR | cef47df9c67c139bba8a5238aa2deecb605249b1 | 6f5f62a62d3d25f7ec330a1df0086809af8a208c | refs/heads/master | 2021-01-19T08:41:20.719220 | 2016-07-19T07:25:19 | 2016-07-19T07:25:19 | 87,662,883 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,235 | rd | plot_rw.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_rw.R
\name{plot_rw}
\alias{plot_rw}
\title{Plot reciprocated graph component}
\usage{
plot_rw(g, extract=TRUE , rcolor="red", othercolor="grey", ...)
}
\arguments{
\item{g}{a directed weighted graph (object of class igraph)}
\item{extract}{logical if TRUE (default), only the reciprocated connections are plotted, if FALSE the whole graph is plotted with reciprocated connections highlighted}
\item{rcolor}{character string, color argument for edges (only used for case extract==FALSE; defaults to "red")}
\item{othercolor}{character string, color argument for non-reciprocated edges (only used for case extract==FALSE; defaults to "grey")}
\item{...}{passed down to igraph}
}
\description{
Plots either only the reciprocated connections of a graph or highlights them in the graph plot.
}
\examples{
# create a directed weighted graph
library(igraph)
g1 <- graph(c(1,2,2,1, 1,3,3,2), directed=TRUE)
E(g1)$weight <- c(2, 7, 3, 4)
V(g1)$name <- c("a", "b", "c")
plot(g1)
plot_rw(g1)
plot_rw(g1, extract=FALSE, edge.arrow.size=0.1, vertex.size=4,
vertex.label=NA, vertex.color="lightgrey")
}
\author{
Ulrich Matter <ulrich.matter-at-unibas.ch>
}
|
afe2917a2daee2bb54e3adf01ddfe7684fe00396 | f313cd982f70daffa643a2f2b1147d284607d613 | /others/add_cordinates.R | 9398159db8556d5d2070469e46d36f779778115c | [
"Apache-2.0"
] | permissive | xuzhenwu/PML-shiny | a0cc59f55c51edc8fb6f278029db038d11faf2b4 | 6d22b310cc264690cff5894b7fc763593540f20f | refs/heads/master | 2023-02-17T17:46:52.206485 | 2021-01-20T09:55:30 | 2021-01-20T09:55:30 | 284,678,124 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 571 | r | add_cordinates.R | library(ncdf4)
dir <- "F:/pml_data/"
fn_proto <- "F:/pml_dataset/output_of_PMLV2.0_GLDAS_NOAH_15D_A201301a_BJ_10mx10m.nc"
fl <- dir(dir, "*.nc", full.names = TRUE)
nc_proto <- nc_open(fn_proto, write = T)
nc$dim$lat$vals <- nc_proto$dim$lat$vals
nc$dim$lon <- nc_proto$dim$lon
for(i in seq_along(fl)){
fn <- fl[i]
nc <- nc_open(fn, write = T)
nc$dim$lat <- nc_proto$dim$lat
nc$dim$lon <- nc_proto$dim$lon
nc$var[[1]]$dim$lat <- nc_proto$dim$lat
nc$var[[1]]$dim$lon <- nc_proto$dim$lon
nc_sync(nc)
nc_close(nc)
}
nc_close(nc_proto)
ncvar_def("") |
af015d0788a1cb9cfa5d3b4e1dd79ebfdb323d6b | 812e9c9df8f276ba93c4f9c79e97fb5430f5ed60 | /RCode/SimulateWeightedTTestClusters.R | c959914f91251b8d5ee27a1e063f15b7b1e7c55f | [
"MIT"
] | permissive | janhove/janhove.github.io | d090a22fa90776926301b9cb5b318475c2010cb5 | 44bb9fe9c8070ecda5cec049e9805da71063326f | refs/heads/master | 2023-08-21T18:52:38.584816 | 2023-08-06T09:11:17 | 2023-08-06T09:11:17 | 22,544,198 | 7 | 5 | null | 2020-05-27T09:56:52 | 2014-08-02T10:41:02 | HTML | UTF-8 | R | false | false | 3,698 | r | SimulateWeightedTTestClusters.R | # Function to generate 1 dataset with clustering
createData.fnc <- function(ICC = 0.1, # intraclass correlation coefficient
clusterSizes = c(10, 50, 13, 80, 14, 86, 62, 45, 41, 8), # cluster sizes
treatment = 0) { # treatment effect (= 0 for simulating null hypothesis)
# Calculate between-cluster variance:
# ICC = var.between / (var.within + var.between)
# here: var.within = 1
# hence: ICC = var.between / (1 + var.between)
# <=> ICC + ICC*var.between = var.between
# <=> var.between - ICC*var.between = ICC
# <=> var.beween*(1-ICC) = ICC <=> var.between = ICC / (1-ICC)
var.between <- (ICC)/(1-ICC)
# Generate data frame with information about clusters
Clusters <- data.frame(cluster = factor(1:length(clusterSizes)), # generate k clusters
effectCluster = rnorm(length(clusterSizes), 0, sqrt(var.between)), # cluster effect (drawn from normal distribution with sd = sqrt(var.between))
conditionCluster = c(rep("control", floor(length(clusterSizes)/2)), # divide clusters into 'control' and 'intervention' clusters
rep("intervention", ceiling(length(clusterSizes)/2)))
)
# Generate data frame with information about participants
Data <- data.frame(cluster = rep(factor(1:length(clusterSizes)), clusterSizes), # each participant belongs to a cluster
effectParticipants = rnorm(sum(clusterSizes), 0, 1) # participant effect (drawn from normal distribution with sd = sqrt(1) (= var.within))
)
# Combine cluster and participant information
Data <- merge(Data, Clusters, by = "cluster")
# Add cluster effect to participant effect and add treatment effect
Data$outcome = Data$effectParticipants + Data$effectCluster + (as.numeric(Data$conditionCluster)-1)*treatment
# Return dataset
return(Data)
}
# Load plyr package (for summarising data)
library(plyr)
# Function for analysing data generated by createData.fnc()
analyseCluster.fnc <- function(dataset) {
# Compute mean and number of observations in each cluster
dat.sum <- ddply(dataset, .(cluster, conditionCluster), summarise,
meanOutcome = mean(outcome),
n = length(outcome))
# Compute p-value for t-test on outcomes (ignoring clustering)
# (anova = t-test here)
ttestIgnore <- anova(lm(outcome ~ conditionCluster, dataset))$'Pr(>F)'[1]
# Compute p-value for t-test on cluster means, weighted for cluster size
ttestWeighted <- anova(lm(meanOutcome ~ conditionCluster, dat.sum, weights = n)))$'Pr(>F)'[1]
# Compute p-value for t-test on cluster means, unweighted
ttestUnweighted <- anova(lm(meanOutcome ~ conditionCluster, dat.sum))$'Pr(>F)'[1]
return(list(ttestIgnore,
ttestWeighted,
ttestUnweighted
)
)
}
# Combine these two function
simulate.fnc <- function(ICC = 0.1,
clusterSizes = c(10, 50, 13, 80, 14, 86, 62, 45, 41, 8),
treatment = 0) {
Data <- createData.fnc(ICC = 0.1,
clusterSizes = sample(clusterSizes), # resample (wo replacement) clusterSizes: you don't know beforehand which cluster ends up in which condition
treatment = treatment)
pVals <- analyseCluster.fnc(Data)
return(pVals)
}
# Now run the simulation 10,000 times and see how many p-values are below 0.05 (should be 5%)
pvals <- replicate(1e4, simulate.fnc(clusterSizes = c(10, 50, 13, 80, 14, 86, 62, 45, 41, 8)))
mean(unlist(pvals[1,] <= 0.05)) # 44%
mean(unlist(pvals[2,] <= 0.05)) # 9%
mean(unlist(pvals[3,] <= 0.05)) # 5%
|
c6ec59368ae99d2a50554b2897a6bbbb28802d6a | fe7788c1e4eba9b2668835da8535b05fcc94e32b | /Bin/Rscript/TZ2.r | 2bcf915b64e559a1f80984e05f471f95b1e1237d | [] | no_license | yiliao1022/Pepper3Dgenome | 77045dd1480abdfe217d47f7c20ff360c080108b | d4a8bc6e181eba45db1dff405f3a179fe4e9b99c | refs/heads/main | 2023-04-14T20:07:09.192776 | 2022-05-30T04:34:10 | 2022-05-30T04:34:10 | 338,714,255 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 268 | r | TZ2.r | library(CALDER)
contact_mat_file_LJTZ2 = "/home/yiliao/OLDISK/genome_assembly/hic_explorer/13_bnbc/100k/Chr12/matrixTZ2.Chr12.csv.txt"
CALDER_main(contact_mat_file_LJTZ2, chr=12, bin_size=10E4, out_dir='./Chr12_LJTZ2', sub_domains=TRUE, save_intermediate_data=FALSE)
|
46946b684e8480d1e2e39baaf0ce40b08861e5e4 | f439a076bc3fcac2c8d7eb72e69dc8d24a00b263 | /Unit 3 Logistic Regression/Assignment3_Loans.R | 6eabf728ff70b2052479c9c37c667810c1b9ecd0 | [] | no_license | jakehawk34/MIT-Analytics | 73f9afb0cbfbbd8202e415f0c50c8e638aa76db1 | daa2ca2eca44ba6c74ba5773d992f68e8c775b90 | refs/heads/main | 2023-05-07T13:54:40.796512 | 2021-05-21T00:31:11 | 2021-05-21T00:31:11 | 344,290,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,390 | r | Assignment3_Loans.R | # Assignment 3
# Predicting Loan Repayment
loans = read.csv("loans.csv")
str(loans)
summary(loans)
table(loans$not.fully.paid)
mean(loans$not.fully.paid)
missing = subset(loans, is.na(log.annual.inc) | is.na(days.with.cr.line) | is.na(revol.util) | is.na(inq.last.6mths) | is.na(delinq.2yrs) | is.na(pub.rec))
str(missing)
nrow(missing)
mean(missing$not.fully.paid)
# Note that to do this imputation, we set vars.for.imputation to all variables in the data frame except for not.fully.paid,
# to impute the values using all of the other independent variables.
loans_imputed = read.csv("loans_imputed.csv")
# Split the data into train and test
set.seed(144)
split = sample.split(loans_imputed$not.fully.paid, SplitRatio = 0.7)
train = subset(loans_imputed, split == TRUE)
test = subset(loans_imputed, split == FALSE)
# Create a logistic regression model for the training set using all independent variables
mod1 = glm(not.fully.paid ~ ., data = train, family=binomial)
summary(mod1)
# Consider two loan applications, which are identical other than the fact that the borrower in Application A has FICO credit score 700
# while the borrower in Application B has FICO credit score 710.
# log(oddsA) - log(oddsB)
logOddsA = -9.317 * 10^-3 * (700)
logOddsB = -9.317 * 10^-3 * (710)
logOddsA - logOddsB
# oddsA are the odds that Application A is not fully paid back and oddsB are the odds that Application B is not fully paid back
# oddsA / oddsB
oddsA = exp(logOddsA)
oddsB = exp(logOddsB)
oddsA / oddsB
# Predict the probability of the test set loans not being paid back in full
# Store these predicted probabilities in a variable named predicted.risk and add it to your test set
predicted.risk = predict(mod1, newdata = test, type = "response")
table(test$not.fully.paid, predicted.risk > 0.5)
test$predicted.risk = predicted.risk
# Accuracy of the logistic regression model
(2400 + 3) / (2400 + 13 + 457 + 3)
# Accuracy of the baseline model
table(test$not.fully.paid)
2413 / (2413 + 460)
# Use the ROCR package to compute the test set AUC.
ROCRpred = prediction(predicted.risk, test$not.fully.paid)
ROCRperf = as.numeric(performance(ROCRpred, "auc")@y.values)
ROCRperf
# Using the training set, build a bivariate logistic regression model (aka a logistic regression model with a single independent variable)
# that predicts the dependent variable not.fully.paid using only the variable int.rate.
mod2 = glm(not.fully.paid ~ int.rate, data = train, family=binomial)
summary(mod2)
# Make test set predictions with the bivariate model, mod2
predicted.risk2 = predict(mod2, newdata = test, type = "response")
max(predicted.risk2)
table(test$not.fully.paid, predicted.risk2 > 0.5)
2413 / (2413 + 460)
# Test set AUC of the bivariate model
ROCRpred2 = prediction(predicted.risk2, test$not.fully.paid)
ROCRperf2 = as.numeric(performance(ROCRpred2, "auc")@y.values)
ROCRperf2
# How much does a $10 investment with an annual interest rate of 6% pay back after 3 years,
# using continuous compounding of interest?
10 * exp(0.06 * 3)
# Profit if the investor is paid back in full
# c * exp(r * t) - c
# In the previous subproblem, we concluded that an investor who invested c dollars in a loan with interest rate r for t years
# makes c * (exp(rt) - 1) dollars of profit if the loan is paid back in full
# and -c dollars of profit if the loan is not paid back in full (pessimistically).
test$profit = exp(test$int.rate*3) - 1
test$profit[test$not.fully.paid == 1] = -1
max(test$profit) * 10
# We will analyze an investment strategy in which the investor only purchases loans with a high interest rate (a rate of at least 15%),
# but amongst these loans selects the ones with the lowest predicted risk of not being paid back in full.
# We will model an investor who invests $1 in each of the most promising 100 loans
highInterest = subset(test, int.rate >= 0.15)
# Average profit of a $1 investment in highInterest loans
mean(highInterest$profit)
# Proportion of loans in highInterest not paid back in full
mean(highInterest$not.fully.paid)
# Next, we will determine the 100th smallest predicted probability of not paying in full by sorting the predicted risks in increasing order
# and selecting the 100th element of this sorted list.
# Find the highest predicted risk that we will include by typing the following command into your R console:
cutoff = sort(highInterest$predicted.risk, decreasing=FALSE)[100]
# Use the subset() function to build a data frame called selectedLoans consisting of the high-interest loans with predicted risk not exceeding the cutoff we just computed.
# Check to make sure you have selected 100 loans for investment.
selectedLoans = subset(highInterest, predicted.risk <= cutoff)
# What is the profit of the investor, who invested $1 in each of these 100 loans?
sum(selectedLoans$profit)
# How many of the selectedLoans loans were not fully paid back?
table(selectedLoans$not.fully.paid)
# We have now seen how analytics can be used to select a subset of the high-interest loans that were paid back at only a slightly lower rate than average,
# resulting in a significant increase in the profit from our investor's $100 investment.
# Although the logistic regression models developed in this problem did not have large AUC values,
# we see that they still provided the edge needed to improve the profitability of an investment portfolio.
|
66b0be209695ff6d873419933b41c7413a34a115 | 33e47772221e64495af2f76fbe946106cd9dd5b5 | /src/co-localizations/fetal_brain_sQTL/find_overlaps.R | 3a42635c66354050930d050b72ffdd63395d84fd | [] | no_license | mikelaff/mirna-eqtl-manuscript | 8ddaed87bd3eee1923afa6f5ef0ebd80de7c205c | a87deb4dc063e03e5371ff7c4d19db782e295e12 | refs/heads/main | 2023-08-31T20:01:11.324071 | 2023-08-24T23:44:31 | 2023-08-24T23:44:31 | 590,512,745 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,891 | r | find_overlaps.R | # find co-localization miRNA-eQTL to fetal brain sQTLs
# for each miRNA-eQTL:
# look for overlap with sQTL at r2 >= 0.8
library(here)
library(dplyr)
library(readr)
library(magrittr)
library(mikelaffr)
# OUTPUT FILES #########################################################################################################
# working directory for co-localization analysis
dir.working <- paste0(here("results/co-localization/"), "fetal_brain_sQTL", "/")
dir.create(dir.working, recursive = TRUE, showWarnings = FALSE)
# table of overlaps
overlap.output.rds <- paste0(dir.working, "fetal_brain_sQTL", "_mirQTL_overlaps_r2at0.8.rds")
# list of miR-eQTL index snps that have overlaps
index.snps.txt <- paste0(dir.working, "fetal_brain_sQTL", "_overlap.index.snps.txt")
# INPUT FILES ##########################################################################################################
# mirQTL eQTLs
mirQTL.df.rds <- here("results/conditional_eqtls/20200120_mirQTLor_compiled/20200120_mirQTLor_conditional_eQTLs_compiled_dataFrame.rds")
# mQTL sQTLs
mQTL.df.rds <- here("results/external_data/fetal_brain_sQTL/fetal_brain_sQTLs.rds")
# Directory for LD at each mirQTL index SNP
mirQTL.ld.dir <- here("results/conditional_eqtls/20200120_mirQTLor_compiled/ld/")
# Directory for LD at each mQTL index SNP
mQTL.ld.dir <- here("results/external_data/fetal_brain_sQTL/ld/")
# GLOBALS ##############################################################################################################
CHROMS <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12",
"chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX")
# Import eQTL Data #####################################################################################################
df.mirQTL <- readRDS(mirQTL.df.rds)
df.mQTL <- readRDS(mQTL.df.rds)
df.mQTL %<>%
mutate(sQTL = paste(snp, intron, sep = "_"))
# Find Overlaps ########################################################################################################
# build table of overlaps
df.overlaps <- tibble()
# loop over each sQTL
for (i in 1:nrow(df.mirQTL)) {
printMessage(df.mirQTL$eQTL[i])
df.mQTL.this.chrom <- NULL
df.mQTL.possible <- NULL
mirQTL.ld.file <- NULL
df.mirQTL.ldbuddies <- NULL
# filter for mQTLs on this chromosome
df.mQTL %>%
filter(chr == df.mirQTL$SNP.CHR[i]) -> df.mQTL.this.chrom
# filter for mQTLs within 1MB of this mirQTL
bp.start <- df.mirQTL$SNP.BP.hg38[i] - 1e6
bp.end <- df.mirQTL$SNP.BP.hg38[i] + 1e6
df.mQTL.this.chrom %>%
filter(BP >= bp.start & BP <= bp.end) -> df.mQTL.possible
# if no possible overlaps, go to next mirQTL
if (nrow(df.mQTL.possible) == 0) {
print(paste("No overlaps possible for:", df.mirQTL$eQTL[i]))
next()
}
# get LD buddies for mirQTL index SNP
# ld file: chrX.hardcall.prefiltered.mirQTLor.chrX:49189007:G:A.ld
mirQTL.ld.file <- paste0(mirQTL.ld.dir, "conditional.mirQTLor.index.", df.mirQTL$eSNP[i], ".ld")
# ld for this index SNP
suppressWarnings(
df.mirQTL.ldbuddies <- read_table(mirQTL.ld.file, col_types = cols())
)
df.mirQTL.ldbuddies %<>%
filter(R2 >= 0.8)
# for each possible overlap
for (j in 1:nrow(df.mQTL.possible)) {
mQTL.ld.file <- NULL
df.mQTL.ldbuddies <- NULL
# get LD buddies for mQTL index SNP
# ld file: mQTL.index.chrX:19203648:C:T.ld
mQTL.ld.file <- paste0(mQTL.ld.dir, "sQTL.index.", df.mQTL.possible$snp[j], ".ld")
# ld for this index SNP
suppressWarnings(
df.mQTL.ldbuddies <- read_table(mQTL.ld.file, col_types = cols())
)
df.mQTL.ldbuddies %<>%
filter(R2 >= 0.8)
if (any(df.mirQTL.ldbuddies$BP_B %in% df.mQTL.ldbuddies$BP_B)) {
print("Overlap found!")
tmp.overlaps <- NULL
tmp.mirQTL <- NULL
tmp.mQTL <- NULL
# all overlaps
overlap.snps <- df.mirQTL.ldbuddies$SNP_B[df.mirQTL.ldbuddies$BP_B %in% df.mQTL.ldbuddies$BP_B]
tmp.overlaps <- tibble(overlap.snps = paste(overlap.snps, collapse = ","))
# build overlap dataframe
tmp.mirQTL <- df.mirQTL[i, ]
colnames(tmp.mirQTL) <- paste0(colnames(tmp.mirQTL), ".mirQTL")
tmp.mQTL <- df.mQTL.possible[j, ]
colnames(tmp.mQTL) <- paste0(colnames(tmp.mQTL), ".mQTL")
df.overlaps %<>%
bind_rows(bind_cols(tmp.mirQTL, tmp.mQTL, tmp.overlaps))
}
}
}
# Export Overlaps ######################################################################################################
saveRDS(df.overlaps, overlap.output.rds)
write_lines(unique(df.overlaps$eSNP.mirQTL), index.snps.txt)
|
566ad851ed0d7a7ded4c3d0aef1c2119fb0b8fc4 | 556d752f9666653cd8cfb6b9658e78fb0db26e14 | /R/pinktoe.R | 15a99d3e9d64db36c80176afcbc2ca2693e92a6e | [] | no_license | cran/pinktoe | 8f49f92a4782f8b7df89746dc7ef5f8b8788936d | 4655b78545f204281d4fec1b28b3e1d28f165475 | refs/heads/master | 2016-09-03T06:46:53.343607 | 2004-09-08T00:00:00 | 2004-09-08T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 877 | r | pinktoe.R | "pinktoe" <-
function (treeobj, textfn, tittext, treeid = "", cgibindir = paste("/~magpn/cgi-bin/",
treeid, "/", sep = ""), htmldir = paste("/home/magpn/public_html/Research/Politics/TREE/",
treeid, "/", sep = ""), localdir = "Tree/", stateprintfn = partyprint,
requirelib = "../party.lib", commonhtml)
{
require("rpart")
require("sfsmisc")
nnodes <- nrow(treeobj$frame)
#
# Since R rpart does not add in the splits to the frame we'll have
# to do it here
#
s <- splits.rpart(treeobj)
treeobj$frame<- cbind(treeobj$frame, s)
for (i in 1:nnodes) {
nlabel <- as.character(treeobj$frame[i, "var"])
if (nlabel != "<leaf>")
genedmhtml(treeobj, noderow = i, textfn, tittext,
cgibindir, htmldir, localdir, stateprintfn = stateprintfn,
requirelib = requirelib, commonhtml)
}
}
|
e6733999632f367a0decbc19e36c3034e548c881 | 07283623f9530c8c1ac7408eb099059d6deb7919 | /R/salvage_prediction.R | 81823343c91957e2a1cea93274d23d7f2883a294 | [] | no_license | hinkelman/DSM2Analysis | 75314f00a8a0a0723d0f43813558148b29c80035 | ebbe09bb57f504b6e1acb8f2939d5491b1abae4a | refs/heads/master | 2023-04-30T16:50:14.140023 | 2021-05-12T20:12:59 | 2021-05-12T20:12:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 501 | r | salvage_prediction.R | #' Salvage prediction
#'
#' Predicted salvage based on salvage_newdata and salvage_model.
#'
#' @md
#' @param facility Water export facility: CVP, SWP, both
#' @param newdata Data frame with input data needed for model predictions
#'
#' @export
#'
salvage_prediction <- function(facility, newdata){
newdata$salvaged <- predict(object = salvage_model(facility),
newdata = newdata,
type = "response")
return(newdata)
}
|
55e0dd38903fb5720c7f6844af04fe2992eada2d | b1c0f88b081a05d267e9570f264fda772546d879 | /lib/Optimization/Algorithms/MetaHeuristics/GeneticAlgorithm/JSSOperators/PartialPriorityList/PPLCrossoverMachines.R | 20ac6ed63f62fa46b4c1691b6179046196409d1e | [] | no_license | pedabreu/OptimizationFramework | 71adf7f46025069b08b8187ca144038a2c464d6b | d04fc81eebdfd9e2cceb6d4df98d522161ba7ebb | refs/heads/master | 2020-12-31T07:19:00.219609 | 2017-03-29T00:07:20 | 2017-03-29T00:07:20 | 86,566,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 836 | r | PPLCrossoverMachines.R | setConstructorS3("PPLCrossoverMachines",function()
{
cross <- Crossover()
cross$chromossomeClass <- "PPLChromosome"
extend(cross,"PPLCrossoverMachines")
})
setMethodS3("run","PPLCrossoverMachines", function(this,
male = NULL,
female = NULL,...) {
parent1 <- male$.genes$.priorityList
parent2 <- female$.genes$.priorityList
dimensions <- length(parent1)
cut <- floor(dimensions/2)
children1 <- c(parent1[1:cut],parent2[(cut+1):dimensions])
children2 <- c(parent2[1:cut],parent1[(cut+1):dimensions])
newChrom1 <- PLChromosome()
newChrom2 <- PLChromosome()
newChrom1$.genes$.priorityList <- children1
newChrom2$.genes$.priorityList <- children2
return(list(newChrom1,newChrom2))
})
|
2c63ca9b7b33e50a2f9d202bd8e071fd5fc336ec | c32e53644e09f1ed99b37395bbc05bda216818fa | /R/cmdscale_lanczos.R | 6d5466faf6fc9566c333e9bef406590ed5c6d169 | [] | no_license | dill/poridge | 241f4c490f5ce4df6172dd81f37cc22fbdbdb731 | d7aa5c88ca7e049592ae6c4bab51849e40bdc508 | refs/heads/master | 2021-01-12T19:52:11.501232 | 2016-06-23T18:50:48 | 2016-06-23T18:50:48 | 35,626,343 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,667 | r | cmdscale_lanczos.R | #' Faster multi-dimensional scaling
#'
#' This is a modified version of \code{\link{cmdscale}} that uses the Lanczos procedure (\code{\link[mgcv]{slanczos}}) instead of \code{eigen}. Called by \code{\link{smooth.construct.pco.smooth.spec}}.
#'
#' @param d a distance structure as returned by \code{\link{dist}}, or a full symmetric matrix of distances or dissimilarities.
#' @param k the maximum dimension of the space which the data are to be represented in; must be in \code{\{1, 2, ..., n-1\}}.
#' @param eig logical indicating whether eigenvalues should be returned.
#' @param add logical indicating if the additive constant of Cailliez (1983) should be computed, and added to the non-diagonal dissimilarities such that the modified dissimilarities are Euclidean.
#' @param x.ret indicates whether the doubly centred symmetric distance matrix should be returned.
#' @return as \code{\link{cmdscale}}
#'
#' @author David L Miller, based on code by R Core.
#' @seealso \code{\link{smooth.construct.pco.smooth.spec}}
# @importFrom mgcv slanczos
#' @references
#' Cailliez, F. (1983). The analytical solution of the additive constant problem.
#' \emph{Psychometrika}, 48, 343-349.
cmdscale_lanczos <- function(d, k = 2, eig = FALSE, add = FALSE, x.ret = FALSE){
if (anyNA(d))
stop("NA values not allowed in 'd'")
if (is.null(n <- attr(d, "Size"))) {
if(add) d <- as.matrix(d)
x <- as.matrix(d^2)
storage.mode(x) <- "double"
if ((n <- nrow(x)) != ncol(x))
stop("distances must be result of 'dist' or a square matrix")
rn <- rownames(x)
} else {
rn <- attr(d, "Labels")
x <- matrix(0, n, n) # must be double
if (add) d0 <- x
x[row(x) > col(x)] <- d^2
x <- x + t(x)
if (add) {
d0[row(x) > col(x)] <- d
d <- d0 + t(d0)
}
}
n <- as.integer(n)
## we need to handle nxn internally in dblcen
if(is.na(n) || n > 46340) stop("invalid value of 'n'")
if((k <- as.integer(k)) > n - 1 || k < 1)
stop("'k' must be in {1, 2, .. n - 1}")
## NB: this alters argument x, which is OK as it is re-assigned.
#x <- .Call(stats::C_DoubleCentre, x)
x <- scale(t(scale(t(x), scale=FALSE)),scale=FALSE)
if(add) { ## solve the additive constant problem
## it is c* = largest eigenvalue of 2 x 2 (n x n) block matrix Z:
i2 <- n + (i <- 1L:n)
Z <- matrix(0, 2L*n, 2L*n)
Z[cbind(i2,i)] <- -1
Z[ i, i2] <- -x
# Z[i2, i2] <- .Call(stats::C_DoubleCentre, 2*d)
Z[i2, i2] <- scale(t(scale(t(2*d), scale=FALSE)),scale=FALSE)
###### this is where Dave modified things
add.c <- max(slanczos(Z, k=1, kl=1)$values)
#e <- eigen(Z, symmetric = FALSE, only.values = TRUE)$values
#add.c <- max(Re(e))
## and construct a new x[,] matrix:
x <- matrix(double(n*n), n, n)
non.diag <- row(d) != col(d)
x[non.diag] <- (d[non.diag] + add.c)^2
#x <- .Call(stats::C_DoubleCentre, x)
x <- scale(t(scale(t(x), scale=FALSE)),scale=FALSE)
}
###### this is where Dave modified things
e <- slanczos(-x/2, k=k)
ev <- e$values#[seq_len(k)]
evec <- e$vectors#[, seq_len(k), drop = FALSE]
k1 <- sum(ev > 0)
if(k1 < k) {
warning(gettextf("only %d of the first %d eigenvalues are > 0", k1, k),
domain = NA)
evec <- evec[, ev > 0, drop = FALSE]
ev <- ev[ev > 0]
}
points <- evec * rep(sqrt(ev), each=n)
dimnames(points) <- list(rn, NULL)
if (eig || x.ret || add) {
evalus <- e$values # Cox & Cox have sum up to n-1, though
list(points = points, eig = if(eig) evalus, x = if(x.ret) x,
ac = if(add) add.c else 0,
GOF = sum(ev)/c(sum(abs(evalus)), sum(pmax(evalus, 0))) )
} else points
}
|
65ef37bde9e5ee2f4972a7d4af074f8b64f35a2c | 7959c075b8d8fd90c423863d6cc51cb29ea517c5 | /day1.R | 42be8046402fda15ed3076c7b28669e6c0e0e947 | [] | no_license | salientsoph/Rexam | d8373e7bbfc85fc38cc203add6572574b88c7926 | 0d0b9cb7fc378654c886ca70ba56498770e8b4a8 | refs/heads/master | 2022-12-24T06:02:13.720153 | 2020-09-25T14:18:31 | 2020-09-25T14:18:31 | 293,553,326 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,234 | r | day1.R | # R 공부 첫 시작!
# ':'은 벡터 만들 때 사용
v1 <- 1:10
v1 = 1:10
1:10 -> v1
# : 사이엔 띄어도, 안 띄어도 됨
# 한 줄에 여러개 넣고 싶으면 ';' 사용
# print 안 쓰는 건 단독일 때만 가능.(console창에만 나타남)
print(v1)
v1
1:100
100:1
(v2 <- v1 + 100); v2 # ()로 묶으면 식으로 간주해서 console 창에 나옴
v3 <- v1 * 10; v3
ls() # list의 약어.
# c(): 벡터 함수 만들기
v4 <- c(10, 5, 7, 4, 15, 1) # 벡터값 지정
v5 <- c(100, 200, 300, '사백')
# seq()
seq(1, 10)
seq(1, 10, 2)
seq(0, 100, 5)
# rep() = repeat
rep(1, 100) # 1을 100번 반복
rep(1:3, 5) # 1부터 3을 5번 반복
rep(1:3, times=5) # 키워드 파라미터. 위와 같은 결과
rep(1:3, each=5)# 각각 반복(1 5개, 2 5개, 3 5개)
?rep # = help() = 설명을 해줌
LETTERS # 대문자 알파벳
letters # 소문자 알파벳
month.name # 월별 이름
month.abb # 월별 간축이름
pi # pi 원주율
LETTERS;letters;month.name;month.abb;pi
LETTERS[1]; LETTERS[c(3,4,5)]
LETTERS[3:5]; LETTERS[5:3]
LETTERS[-1] # - 인덱스: 그 원소만 빼고 출력(여기서는 1만 빼고 출력)
LETTERS[c(-2,-4)] # 2랑 4만 빼고 출력
length(LETTERS) # 데이터의 갯수
length(month.name)
length(pi)
x <- c(10,2,7,4,15)
x
print(x)
class(x) # 데이터의 타입을 체크
class(letters) # character
rev(x) # rev 함수에서만 새로운 벡터를 리턴(기존 벡터는 변경 없음)
range(x) # 최소값, 최대값을 꺼냄
sort(x) # (accendent)
sort(x, decreasing = TRUE) # (decendent)
sort(x, decreasing = T) # 위와 동일
x <- sort(x)
order(x) # 인덱스 나열, accendent. 가장 작은 element값부터 순서대로 인덱스를 나열함
x[3] <- 20
x
x + 1 # 각각 element 마다 +1
x <- x + 1
max(x);min(x);mean(x);sum(x)
summary(x) # 수치데이터들
x[c(2,4)] # = x[2], x[4]. 인덱스값을 줌. 이에 해당되는 element(2,4)는 True.
x[c(-2,-4)] # 2, 4 가 False.
x[c(F,T,F,T,F)]
x[c(T,F)] # 반복해서 적용함(T,F,T,F,..식으로)
x > 5 # 원소마다 5보다 큰지 확인
x[x > 5] # x 값 중에서 5보다 큰값만 추출
x[x > 5 & x < 15] # 5초과, 15미만 인 값만 추출(각각의 원소마다)
x[x > 5 && x < 15] # 벡터의 첫번째 원소만 고려
x[x > 5 | x < 15] # | = or. 5 초과 이거나 15 미만
# names(): textminding 할 때 사용
names(x) # x 벡터의 이름 벡터(현재는 정해져있지 않으므로 null)
names(x) <- LETTERS[1:5] # 각각의 element마다 고유의 이름 부여 가능
names(x)
names(x) <- NULL # 네임벡터 없애기
names(x)
x[2];x["B"]; # 인덱싱 2번째의 네임벡터를 'B'로 지정
# x[B()]: B라는 함수를 가져와서 x에 대입하고 리턴
# &(각각의 원소마다), &&(첫번째 원소만)
c(T, T, F, F) & c(T, F, T, F) # T F F F
c(T, T, F, F) | c(T, F, T, F) # T T T F
c(T, T, F, F) && c(T, F, T, F) # T
c(T, T, F, F) || c(T, F, T, F) # T
ls()
rm(x) # 벡터 삭제
x
class(x)
rainfall <- c(21.6, 23.6, 45.8, 77.0,
102.2, 133.3,327.9, 348.0,
137.6, 49.3, 53.0, 24.9)
rainfall > 100 # boolean 값으로 나타남
rainfall[rainfall > 100] # rainfall 중에서 100 이상의 값인 rainfall 값을 나타냄
which(rainfall > 100) # 인덱스를 뽑아냄
month.name[which(rainfall > 100)]
month.abb[which(rainfall > 100)]
month.korname <- c("1월","2월","3월",
"4월","5월","6월",
"7월","8월","9월",
"10월","11월","12월")
month.korname[which(rainfall > 100)]
which.max(rainfall)
which.min(rainfall)
month.korname[which.max(rainfall)]
month.korname[which.min(rainfall)]
# sample 함수(벡터함수, 몇 개 꺼낼건지)
sample(1:20, 3) # 1~20 중 3개 랜덤 추출
sample(1:45, 6)
sample(1:10, 7)
sample(1:10, 7, replace=T) # replace=T: 중복해도 상관 없을 때
# paste: 아규먼트로 이루어진 것들을 하나로 결합해줌
paste("I'm","Duli","!!") #default: 각각의 element 사이사이 blank 줌
paste("I'm","Duli","!!", sep="-")
paste("I'm","Duli","!!", sep="") # 공백을 없앨 때
paste0("I'm","Duli","!!") # paste0: 중간에 공백을 없앰, 위와 동일
fruit <- c("Apple", "Banana", "Strawberry")
food <- c("Pie","Juice", "Cake")
paste(fruit, food)
paste(fruit, food, sep="")
paste(fruit, food, sep=":::")
paste(fruit, food, sep="", collapse="-") # collapse: 각각의 element를 붙임
paste(fruit, food, sep="", collapse="")
paste(fruit, food, collapse=",")
# matrix 실습
x1 <-matrix(1:8) # 8행 1열
x1
x1 <-matrix(1:8, nrow = 2) # 2행 4열
x1
x1<-x1*3
x1
sum(x1); min(x1);max(x1);mean(x1)
# matrix: 모든 열의 행의 갯수가 같고, 모든 행의 열의 갯수가 같다.
x2 <-matrix(1:8, nrow =3) # 3행(-> 열의 갯수가 3의 배수여야함)
x2
(chars <- letters[1:10])
mat1 <-matrix(chars)
mat1; dim(mat1)
matrix(chars, nrow=1) # 1행
matrix(chars, nrow=5) # 5행, 데이터를 열부터 채운다
matrix(chars, nrow=5, byrow=T) # byrow=T: 데이터를 행부터 채운다
matrix(chars, ncol=5)
matrix(chars, ncol=5, byrow=T)
matrix(chars, nrow=3, ncol=5)
matrix(chars, nrow=3)
vec1 <- c(1,2,3)
vec2 <- c(4,5,6)
vec3 <- c(7,8,9)
mat1 <- rbind(vec1,vec2,vec3); mat1 # vec1, vec2, vec3을 행으로 받음
mat2 <- cbind(vec1,vec2,vec3); mat2 # vec1, vec2, vec3을 열로 받음
mat1[1,1]
mat1[2,];mat1[,3]
mat1[1,1,drop=F]
mat1[2,,drop=F];mat1[,3,drop=F]
rownames(mat1) <- NULL
colnames(mat2) <- NULL
mat1;mat2
rownames(mat1) <- c("row1","row2","row3")
colnames(mat1) <- c("col1","col2","col3")
mat1
ls()
mean(x2)
sum(x2)
min(x2)
max(x2)
summary(x2)
mean(x2[2,])
sum(x2[2,])
rowSums(x2); colSums(x2)
# apply(matrix or array, 방향, 처리할 내용)
x2
apply(x2, 1, sum) # 1:행. 같은 행에 있는 값들을 더함
apply(x2, 2, sum) # 2:열. 같은 열에 있는 값들을 더함
?apply
apply(x2, 1, max)
apply(x2, 1, min)
apply(x2, 1, mean)
apply(x2, 2, max)
apply(x2, 2, min)
apply(x2, 2, mean)
#Array 실습
a1 <- array(1:30, dim=c(2,3,5)) #행, 열, 층
a1
a1[1,3,4] # 1행 2열 4층의 element 추출
a1[,,3] # 3층의 모든 element
a1[,2,] # 모든 층에 대해 2열의 모든 element
a1[1,,] # 모든 층에 대해 1행의 모든 element
|
489cc13fb198a9bd67739ea2f5bb711b219a60ea | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#114.A#48.c#.w#3.s#10.asp/ctrl.e#1.a#3.E#114.A#48.c#.w#3.s#10.asp.R | 6c697f2922f10821768e1092af510d0c00bc27fa | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 90 | r | ctrl.e#1.a#3.E#114.A#48.c#.w#3.s#10.asp.R | 4c8fc571d0cbd29ef611044c6cdc5017 ctrl.e#1.a#3.E#114.A#48.c#.w#3.s#10.asp.qdimacs 2897 8206 |
6401ba9adaee85d217bae45e844d4e9213096d92 | cde9f43c78142fd5fbb15ce42da626c3b18881a4 | /man/scrape_lse_sectors.Rd | c408fd15aea747de9cdf693c772aa4ffac29101b | [] | no_license | lina2497/webscrapeR | b282f03bfd5f212de1641a69cec6d8d798a6a82b | a33691b00bf81de045cdb695b78b2ab33731ee26 | refs/heads/master | 2020-09-08T03:52:48.060848 | 2019-11-12T16:11:11 | 2019-11-12T16:11:11 | 221,007,168 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 743 | rd | scrape_lse_sectors.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrape_lse_sectors.r
\name{scrape_lse_sectors}
\alias{scrape_lse_sectors}
\title{Scrape share prices from a particular sector from the London Stock Exchange website.}
\usage{
scrape_lse_sectors(url, pause = 0)
}
\arguments{
\item{url}{A url from "https://www.londonstockexchange.com".}
\item{pause}{Indicates how long the function should sleep for before returning a value.}
}
\value{
A dataframe of historical share statistics from a particular sector Use \code{pause} when vectorising this function
over a list of urls to minimise nuisance to target website.
}
\description{
Scrape share prices from a particular sector from the London Stock Exchange website.
}
|
1865521686d70951f1eb3c911ca922a461a3e2be | 58a116de33b64fbbdc4a7d6d7c9d90289092ecf9 | /scripts/network1.r | 6b0c8452a9b601802131a41d50bc66331016a8e1 | [] | no_license | rian39/imaginaries | 1eba2b1b351159854fa0e83bf80d8d60ae83ca0b | b91e60c19dda3e50c9c266a28ddfdfb4c72503fa | refs/heads/master | 2021-01-18T21:14:50.202293 | 2016-08-17T11:56:36 | 2016-08-17T11:56:36 | 11,882,454 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,384 | r | network1.r | library(network)
g3 = network(imag[,c('term', 'author')],loops=TRUE,bipartite=TRUE)
plot(g3, displaylabels=TRUE)
g3 = network(imag[,c('term', 'author')],loops=FALSE,bipartite=TRUE)
plot(g3, displaylabels=TRUE)
g3 = network(imag[,c('term', 'author')],loops=FALSE,bipartite=FALSE)
plot(g3, displaylabels=TRUE)
g3 = network(imag[,c('term', 'author')],loops=FALSE,bipartite=FALSE)
svg('network1.svg')
plot(g3, displaylabels=TRUE)
dev.off()
add.edges(g3, imag$author, imag$cites1)
add.edges(x=g3, imag$author, imag$cites1)
str(g3)
mode(g30
)
mode(g3)
summary(g3)
network.vertex.names(g3)
nn = network.vertex.names(g3)
vs = unique(imag$cites1)
vs
vs %in% nn
vs[!vs %in% nn]
add.vertices(g3, vs[!vs %in% nn])
add.vertices(x=g3, vs[!vs %in% nn])
add.edges(x=g3, imag$author, imag$cites1)
as.network(g3)
plot(g3)
plot(g3)
plot(g3,displaylabels=TRUE)
add.vertices(g3, c(1,2)
g3
?network.vertex
?add.vertices
vs[!vs %in% nn]
newvert = vs[!vs %in% nn]
length(newvert)
?add.vertices
va = unique(c(imag$author, imag$cites1))
va
imag$author
va = c(imag$author, imag$cites1)
va
imag$cites1
va = unique(c(as.character(imag$author), as.character(imag$cites1)))
va
vb = unique(as.character(imag$term))
vb
g4 = network(x =imag[, c('term', 'author')])
g5 = network(x =imag[, c('author, 'cites1')])
g5 = network(x =imag[, c('author', 'cites1')])
plot(g4, displaylabels=TRUE)
plot(g5, displaylabels=TRUE)
|
8fc6d3e5ea258ecd5d67d6f80afe9a80eab9b800 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /irt/man/plot_empirical_icc.Rd | 8bd3cefa92d1d777c5776a1045cd8765b2798345 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 3,222 | rd | plot_empirical_icc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_IRT.R
\name{plot_empirical_icc}
\alias{plot_empirical_icc}
\title{Plot Empirical Item or Test characteristic curve}
\usage{
plot_empirical_icc(
resp,
item,
type = "eicc",
bins = 10,
ip = NULL,
theta = NULL,
title = "",
suppress_plot = FALSE,
x_axis_scale = NULL,
n_dodge = 1,
...
)
}
\arguments{
\item{resp}{Response matrix.}
\item{item}{The column number, column name or the 'id' of the the item that
should be plotted.}
\item{type}{The type of the graph that will be plotted.
\describe{
\item{\strong{\code{"eicc"}}}{Plot empirical item characteristic curve.
Examinees will be put into bins based on their total raw scores and the
proportion of examinees who correctly answered an item for each bin
will be plotted.}
\item{\strong{"oep"}}{Plot Observed p-values vs. expected p-values
grouped into bins based on total raw scores or theta scores.
This plot requires an \code{\link{Itempool-class}} object.
Optionally, provide \code{theta} vector, otherwise examinee abilities
will be estimated by \code{est_ability(..., type = "eap")}. This will
slow down the plotting function.
}
}}
\item{bins}{An integer larger than 2 representing of ability groups examinees
should be grouped into. The default is \code{10}. The maximum value of
\code{bins + 1} is the number of possible total scores.}
\item{ip}{An \code{\link{Itempool-class}} object that is needed for some
plots.}
\item{theta}{A vector of examinee abilities.}
\item{title}{Title of the plot}
\item{suppress_plot}{If \code{FALSE} the function will print the plot. If
\code{TRUE}, function will return the plot object. Default value is
\code{FALSE}.}
\item{x_axis_scale}{Set the scale of the x-axis. The default value is
\code{NULL}. For total score it will be defaulted to \code{"percent"}.
\describe{
\item{\strong{\code{"percent"}}}{Percent interval.}
\item{\strong{\code{"number"}}}{Numbers between 1 and \code{bins}}
\item{\strong{\code{"theta"}}}{Theta values equally divided into bins.
the middle value of the bin is shown in the x-axis. For example, if
\code{bins = 10}, the first tick of the x-axis will be the mean of
minimum theta value and tenth percentile theta value.
This is the only option for \code{type = "oep"}. }
}}
\item{n_dodge}{The number of lines the x-axis tick labels should be written
to. This is especially useful if the x-axis tick labels overlap with each
other. The default value is \code{1}, which means all of the labels are
written on the same line.}
\item{...}{Extra parameters that will pass to \code{geom_line}.}
}
\value{
Depending on the value of \code{suppress_plot} function either prints
the empirical item or test characteristic curve or returns the plot object.
}
\description{
\code{plot_emprical_icc} plots empirical item or test characteristic curve.
}
\examples{
# Plot the information function of an item
resp <- sim_resp(ip = generate_ip(model = "3PL", n = 20),
theta = rnorm(10000))
plot_empirical_icc(resp, 3)
# Change the number of bins
plot_empirical_icc(resp, 4, bins = 15)
}
\author{
Emre Gonulates
}
|
351369686daf60f7547b3bbc28f00cddd6045d3d | 4855e806d6a5b65643c49ed3b602db276fe76d30 | /library/rsconnect/man/showUsers.Rd | 9bb249f285949adde1b7e1485735543d8fe1e6ce | [] | no_license | Cococatty/InteractiveMap | 5701a607a7605a4958c037b6b5559841c67126eb | 698b173ab0393cc38fdfd69f09b169dd87fd9f3d | refs/heads/master | 2021-01-10T18:14:56.274796 | 2016-02-17T09:02:23 | 2016-02-17T09:02:45 | 47,664,845 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 805 | rd | showUsers.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/auth.R
\name{showUsers}
\alias{showUsers}
\title{List authorized users for an application}
\usage{
showUsers(appDir = getwd(), appName = NULL, account = NULL,
server = NULL)
}
\arguments{
\item{appDir}{Directory containing application. Defaults to
current working directory.}
\item{appName}{Name of application.}
\item{account}{Account name. If a single account is registered on the
system then this parameter can be omitted.}
\item{server}{Server name. Required only if you use the same account name on
multiple servers.}
}
\description{
List authorized users for an application
}
\note{
This function works only for ShinyApps servers.
}
\seealso{
\code{\link{addAuthorizedUser}} and \code{\link{showInvited}}
}
|
c7934fe456068f30c26de8acdf9f376df5ee28ef | 7040a7db0d8da630d5223cd5ebdd7b88d71e10bd | /Code/fuzzy_matching (1).R | fec43e46ef15629b89b42dcb6e294e9e1bd34be0 | [] | no_license | josephsguido/coi | ea8713767fbdd74fea8cb0ee27787902cd2c94cf | c5c2b82106dfa28f9e3ae36efb5196469944b2d7 | refs/heads/master | 2023-02-22T23:06:16.418004 | 2020-06-14T01:55:58 | 2020-06-14T01:55:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,800 | r | fuzzy_matching (1).R | library(dplyr)
applicants <- read.csv(url("https://www.dropbox.com/s/o4j1h7tqbmb5746/pull_All_Years_rename_56.csv?raw=1"))%>% pull(x)
residents <- read.csv(url("https://www.dropbox.com/s/7v67tpo8v52q58k/pull_residents_distinct_23.csv?raw=1")) %>% pull(x)
set1 <- applicants
set2 <- residents
library(stringdist)
fuzzymatch<-function(dat1,dat2,string1,string2,meth,id1,id2){
#initialize Variables:
matchfile <-NULL #iterate appends
x<-nrow(dat1) #count number of rows in input, for max number of runs
#Check to see if function has ID values. Allows for empty values for ID variables, simple list match
if(missing(id1)){id1=NULL}
if(missing(id2)){id2=NULL}
#### lowercase text only
dat1[,string1]<-as.character(tolower(dat1[,string1]))#force character, if values are factors
dat2[,string2]<-as.character(tolower(dat2[,string2]))
#Loop through dat1 dataset iteratively. This is a work around to allow for large datasets to be matched
#Can run as long as dat2 dataset fits in memory. Avoids full Cartesian join.
for(i in 1:x) {
d<-merge(dat1[i,c(string1,id1), drop=FALSE],dat2[,c(string2,id2), drop=FALSE])#drop=FALSE to preserve 1var dataframe
#Calculate String Distatnce based method specified "meth"
d$dist <- stringdist(d[,string1],d[,string2], method=meth)
#dedupes A_names selects on the smallest distatnce.
d<- d[order(d[,string1], d$dist, decreasing = FALSE),]
d<- d[!duplicated(d[,string1]),]
#append demos on matched file
matchfile <- rbind(matchfile,d)
# print(paste(round(i/x*100,2),"% complete",sep=''))
}
return(matchfile)
}
fuzzymatch
set1<-data.frame(set1)
set2<-data.frame(set2)
matchNames<-fuzzymatch(set1,set2,"set1","set2",meth="osa")
head(matchNames)
|
cc0138b07a059b5c460e17e018b34d0036ca5029 | f2bb9dc756f74ccfd1aa7bb4e1aa9d682d93e628 | /R/FilterOutputStream.R | 3a258845822bc6920229f54e4ec7611281bbb357 | [] | no_license | HenrikBengtsson/R.io | 660437c62f692db4fecfbae08648eb56237e9ca2 | 7ff13117d31299027e9029c2ec9d92ec5079273b | refs/heads/master | 2021-01-01T19:43:25.136246 | 2014-06-19T04:11:28 | 2014-06-19T04:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,503 | r | FilterOutputStream.R | setConstructorS3("FilterOutputStream", function(out=NULL) {
if (!is.null(out)) {
if (!inherits(out, "OutputStream"))
throw("Argument 'out' is not an OutputStream: ", data.class(out));
}
extend(OutputStream(), "FilterOutputStream",
out = out
);
})
setMethodS3("close", "FilterOutputStream", function(con, ...) {
# To please R CMD check...
this <- con;
if (!is.null(this$out)) {
flush(this);
close(this$out);
}
})
setMethodS3("flush", "FilterOutputStream", function(con, ...) {
# To please R CMD check...
this <- con;
if (!is.null(this$out))
flush(this$out);
})
setMethodS3("write", "FilterOutputStream", function(this, b, off=1, len=length(b), ...) {
write(this$out, b, off=off, len=len);
})
############################################################################
# HISTORY:
# 2002-03-06
# * NOTE: Don't set this$out <- NA after closing because maybe the stream
# is not closeable. Just swallow the request silently.
# * Removed obsolete getInternalReferences().
# 2002-03-05
# * BUG FIX: close() now only tries to flush() and close() the internal
# OutputStream if it exists.
# 2002-01-21
# * Rewritten to use setMethodS3().
# * Changed all this$method(...) to method(this, ...), which is faster.
# 2001-05-14
# * Added getInternalReferences() for improving gco() performance.
# 2001-04-30
# * Added code for write().
# 2001-04-29
# * Created.
############################################################################
|
6d42927d5cd7645f0e5512a0849f4245a1d9f463 | 7bb3f64824627ef179d5f341266a664fd0b69011 | /Basic_Engineering_Mathematics_by_John_Bird/CH28/EX28.9/Ex28_9.R | 2c089aab45ae6f07a53b707750ccf904d6c266f4 | [
"MIT"
] | permissive | prashantsinalkar/R_TBC_Uploads | 8bd0f71834814b1d03df07ce90b2eae3b7d357f8 | b3f3a8ecd454359a2e992161844f2fb599f8238a | refs/heads/master | 2020-08-05T23:06:09.749051 | 2019-10-04T06:54:07 | 2019-10-04T06:54:07 | 212,746,586 | 0 | 0 | MIT | 2019-10-04T06:03:49 | 2019-10-04T06:03:48 | null | UTF-8 | R | false | false | 209 | r | Ex28_9.R | #page no. 294
# formula used: volume of pyramid = area of base*perpendicular height/3
#given: square pyramid
l = 6 # side of square base
h = 16 # perpendicular height
volume = ((l*l)*h)/3
print(volume)
|
6b524471cea7873cb98c56faa444d5ffd2687d00 | ec139ad96578d965d6c66a340ffc2a6859f92f0e | /R/plotRangeObj_final.R | f8e9a88ee9a979ba624ff5f26814d69967237c68 | [] | no_license | cammiller/imagingPC | 5769541c387d029978ec489128e727120a6fa539 | 8022b40d9125adc2a0c195e21c194b171e5019c8 | refs/heads/master | 2022-01-18T05:31:55.227296 | 2019-06-27T20:09:44 | 2019-06-27T20:09:44 | 194,138,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,595 | r | plotRangeObj_final.R | #' Plot a range object.
#'
#' \code{plotIDs} the semivariance estimates, covariance model, and number of
#' pairs of observations used to estimate semivariance.
#'
#' @param rangeObj An object of class \code{rangeList}. This object is a list
#' that contains the estimated range parameter(s). This is obtained by a call
#' to the \code{\link{estRange}} function.
#'
#' @examples
#' data("TAMdata")
#' # The dataset is trimmed only for the speed of the example
#' TAMdata <- TAMdata[TAMdata$subject < 3, ]
#' TAMdata <- rScale(TAMdata, subjectVar = 'subject', sampleVar = 'ROI',
#' xCoord = 'x', yCoord = 'y')
#' rangs <- estRange(TAMdata, outcome = 'X1282.auc', spatialVar = 'TAM',
#' semivEst = 'modulus', logTransform = TRUE)
#' plotRangeObj(rangs)
#'
#' @return A set of plots displaying the semivariance estimates, fitted
#' covariance model, and number of pairs of observations used to estimate
#' semivariance. If no spatial variable was provided to the
#' \code{\link{estRange}} function, then there will be two plots. The first,
#' on top, will show the semivariance estimates and the fitted covariance
#' model. The second, on the bottom, will show the number of data pairs used
#' to the estimate the semivariance at the corresponding distance. If a
#' spatial variable is supplied to the \code{\link{estRange}} function, then
#' there will be two plots for every level of the spatial variable.
plotRangeObj<-function(rangeObj){
if(is.null(rangeObj[['spatialVar']])==TRUE){
semiline<-data.frame(rangeObj$semivarFit$u,rangeObj$semivarFit$v,rangeObj$semivarFit$n)
colnames(semiline)<-c('dist','semiv','num')
covfit<-matrix(nrow=1000, ncol=2)
covfit<-as.data.frame(covfit)
colnames(covfit)<-c('dist','fitval')
covfit[,1]<-seq(0,max(rangeObj$semivarFit$u), length.out = nrow(covfit))
for(i in 1:nrow(covfit)){
covfit[i,2]<-rangeObj$covModelFit$nugget + ((rangeObj$estSig2*(1-exp(-(covfit$dist[i]/rangeObj$estRange)**2))))
}
p1<-ggplot(semiline, aes(x=dist, y=semiv)) +
geom_point() +
geom_line(data=covfit, aes(x=dist, y=fitval), color='red') +
ylim(0,max(semiline$semiv)) +
labs(x='distance',y='semivariance') +
theme_bw()
p2<-ggplot(data=semiline, aes(x=dist,y=num)) +
geom_area(data=semiline, aes(x=dist,y=num), fill='darkblue') +
xlim(0,max(semiline$dist)) +
labs(x='distance',y='number of data pairs') +
theme_bw()
grid.arrange(p1,p2, ncol=1)
}
if(is.null(rangeObj[['spatialVar']])==FALSE){
semiline<-list()
for(i in 1:length(rangeObj$estRange)){
semiline[[i]]<-data.frame(rangeObj$semivarFit[[i]]$u,rangeObj$semivarFit[[i]]$v,rangeObj$semivarFit[[i]]$n)
colnames(semiline[[i]])<-c('dist','semiv','num')
}
covfit<-list()
for(i in 1:length(rangeObj$estRange)){
covfit[[i]]<-matrix(nrow=1000, ncol=2)
covfit[[i]]<-as.data.frame(covfit)
colnames(covfit[[i]])<-c('dist','fitval')
covfit[[i]][,1]<-seq(0,max(rangeObj$semivarFit[[i]]$u), length.out = nrow(covfit[[i]]))
for(j in 1:nrow(covfit[[i]])){
covfit[[i]][j,2]<-rangeObj$covModelFit[[i]]$nugget + ((rangeObj$estSig2[i]*(1-exp(-(covfit[[i]]$dist[j]/rangeObj$estRange[i])**2))))
}
}
spatvarlevels<-sort(unique(rangeObj$data[[rangeObj$spatialVar]]))
for(i in 1:length(rangeObj$estRange)){
eval(parse(text = c(paste0('p',i,'<-','ggplot(semiline[[',i,']], aes(x=dist, y=semiv)) +'),
'geom_point() +',
paste0('geom_line(data=covfit[[',i,']], aes(x=dist, y=fitval), color="red") +'),
paste0('ylim(0,max(semiline[[',i,']]$semiv)) +'),
paste0('labs(x="distance",y="semivariance", title="',rangeObj$spatialVar,'=',spatvarlevels[i],'") +'),
'theme_bw()')))
eval(parse(text = c(paste0('p',(length(rangeObj$estRange)+i),'<-','ggplot(semiline[[',i,']], aes(x=dist, y=num)) +'),
paste0('geom_area(data=semiline[[',i,']], aes(x=dist, y=num), fill="darkblue") +'),
paste0('xlim(0,max(semiline[[',i,']]$dist)) +'),
'labs(x="distance",y="number of data pairs") +',
'theme_bw()')))
}
eval(parse(text = paste0('grid.arrange(',paste('p',seq(1:(length(rangeObj$estRange)*2)), sep='', collapse=', '),', nrow=2, heights=c(1.25,1))')))
}
}
|
833c14e632e0409eb42a1ff77000f5ae1939f086 | bfafdbde9bf19502bbc3c7b7b637a702b7b06b99 | /CEO_Departures/departures.r | 5bf35802e5ea1697d266fd0244978e0ab7711995 | [] | no_license | aichunC/tidytuesday | 2073102ff1a75d6ec7931c6b31d9c2398c3cf01e | a5a645b5293962e32fc6332b37497d1cb6a33018 | refs/heads/main | 2023-05-23T06:27:21.466192 | 2021-06-10T03:26:42 | 2021-06-10T03:26:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,718 | r | departures.r | library(tidytuesdayR)
library(tidyverse)
library(forcats)
# get the data file, read description about the variables.
tuesdata <- tidytuesdayR::tt_load(2021, week = 18)
departures <- tuesdata$departures
departures2 <- departures %>% select(coname,fyear,exec_fullname,departure_code,
ceo_dismissal,tenure_no_ceodb,max_tenure_ceodb)
#Q1, which company has the most departures?
# sears holding corp 11 departures, Great Elm Captial Group Inc 10 departures
departures2 %>% group_by(coname) %>%count() %>% arrange(desc(n))
#Q2, which year has the most departure? 2008-2009?
# hoop, we foud out it is 2017-2018
departures2 %>% group_by(fyear) %>% summarise(n=n()) %>%arrange(desc(n)) %>%
ggplot()+
geom_point(mapping=aes(x=fyear,y=n))
#Q3, what are the most common reason of departure ?
CR<-departures2 %>% group_by(departure_code) %>% count() %>% na.omit() %>%
arrange(desc(n))
CR$departure_code <-factor(CR$departure_code,level=unique(CR$departure_code))
CR %>% mutate(departure_code=fct_reorder(departure_code, n) %>% fct_rev()) %>%
ggplot()+
geom_point(mapping=aes(x=n,y=departure_code, color=departure_code)) +
labs( title="The most common departure reason: voluntar- CEO retired",
subtitle="Plot of the reasons in increasing order",
caption ="data from Gentry et al. by way of DatalsPlural"
)+
annotate("text",x=3250,y=9,label="Voluntary-CEO retired")+
theme_linedraw() +
scale_color_discrete(label=c("Missing","CEO death","Execucomp error","CEO illness","CEO new opportunity","CEO dismissed for legal violations or concerns",
"CEO dismissed for job performance","Other","CEO retired")) +
theme(legend.position = "bottom")
#Q4 ceo_dismissal : a dummy code for involuntary, non-health realted turnover
# oh there are ~1489 ceo dismissal in the USA
departures2 %>% count(ceo_dismissal)
#Q5 for CEO who return,(tenure_no_ceodb >=2), what is their max_tenure_ceo,ie,
#how many times did s/he serve as CEO
Mx<-departures2 %>% group_by(tenure_no_ceodb) %>% nest()%>%mutate(mx=map(data,~pluck(.x)%>%
select(max_tenure_ceodb)%>%
group_by(max_tenure_ceodb)%>%
count()))
Mx_models <- Mx %>% mutate( models=map(mx, ~lm(max_tenure_ceodb~n,data=.x) ) )
Mx_coeff <- Mx_models %>% mutate(coeffs = map_dbl(models, ~coefficients(.x)%>%pluck("(Intercept)")))
|
9426426a1ed4526886880ade48b7bd9588afd93a | 3857f5effb1c16505f2747e54406955130162af3 | /PPSD code for all syntax.R | b42c5e05497ec5094b2d738ff84904ad52e9783f | [] | no_license | hamzakashif902/PPSD-repositary | 737084b03d9f7fd5365d2cb7a81ae9c322b4800a | 4dbc64e80ee965dae3e381e5af44025333dca77c | refs/heads/master | 2020-12-02T09:33:47.898775 | 2019-12-30T19:25:56 | 2019-12-30T19:25:56 | 230,964,882 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,267 | r | PPSD code for all syntax.R | #assigning value and printing value
a = 1 #using =
print(a) #printing a/ printinh variable
b <- a+2 #using <-
print(b)
4 -> a #using ->
print(a)
#printing multiple data types
cat("value of a: ", a, "value of b: ", b)
#check data type of variable a
print(class(a))
#list all available variable at workspace
print(ls())
#usually variable name start with dot are not displayed with ls()
#so use this command to show them too
print(ls(all.names = TRUE))
#print variable at workspace which include 'za'
print(ls(pattern = 'za'))
#delete variable from workspace, a is variable
rm(a)
#to all variable on workspace
rm(list = ls())
#adding to vectors mean array index by index
v <- c(2, 0, 4, 1)
d <- c(2, 14, 0, 11)
sum2 <- (v+d)
print(sum2)
#adding all elements of two arrays
sum4 <- sum(v,d)
print(sum4)
#adding all elements of one array
sum5 <- sum(v)
print(sum5)
#subtract arrays
print(d-v)
#multiply arrays element
print(d*v)
#devide elements of arrays
print(d/v)
#modules of two array's element
print(d%%v) #reminder will be answer
#
print(d%/%v) #quotient will be answer
#Relational Operators
print(d > v)
print(d < v)
print(d == v)
print(d >= v)
print(d <= v)
print(d != v)
#logical operator
print(d & v)
print(d | v)
print(!v)
#Left Assignment
a <- 3
a = 3
a <<- 3
#Right Assignment
3 -> a
3 ->> a
#Scan value
d <- 1:8
a <- 4
b <- a %in% d
print(b)
#Multiply matrix with its transpose
ma = matrix( c(1,2,3,4,5,6), nrow = 2,ncol = 3,byrow = TRUE) #making matrix
tra = ma %*% t(ma)
print(tra)
#to check a is integer
a <- 10L #L for integer data, integer is also numaric but numaric is not integer
is.integer(a) #true if a is integer
is.numeric(a) #true if a is numaric
is.character(a) #true if a is characer
#if else condition
if(a<2){
print("a is less than 2")
}else{
print("a is greater")
}
#array
arr<-c(1,2,3,4,5)
print(arr)
#array from 1 to 4
arr<-1:4
#array from 4 to 1
arr<-4:1
#loop and continue
s<-"value in v"
v<-4:1 #v=4,3,2,1 array
for(c in v){ #c = 4,3,2,1 values from 4 to 1
#print(s)
if(c==2){
next #skipt 2
}
print (c)
}
#repeat and break mean do-while
r<-1
repeat{ #repeat
print(r)
if(r==6){ #until condition match
break
}
r<-r+1
}
#while
w<-1 #starting value
while(w<3){ #condition
print(w)
w<-w+1 #incrementing
}
#procedure
a<-1
b<-2
c<-3
addThreeVlaues <- function(a, b, c){
d<-a+b+c
return(d)
}
d<-addThreeVlaues(a,b,c)
print(d)
#taking input from user as string into a
a<-readline()
#convert string or character into integer and store into b
b<- as.integer(a)
#creat list of diff data types and store in list1
list1 <- list(c(11,12,13,14), "zahid", sin, 2)
print(list1[[1]]) #printin first index data that is array
print(list1[[1]][2]) #printing the 2nd element of array
#creat matrix
ma <- matrix(c(2,3,4,5,6,7), nrow = 2,ncol = 3, byrow = TRUE)
print(ma)
#factors create levels base of distinct values
arr<-c('red', 'green', 'blue', '1', '1', 'red')
fac<-factor(arr)
print(fac)
print(nlevels(fac)) #LEVELS
#data frame
fra <- data.frame(
name = c('zahid ali', 'abdul samad', 'waleed', 'Riana'),
age = c(20, 19, 19, 25),
gender = c('male', 'male', 'male', 'female')
)
print(fra)
#switch
x <- switch(
2, #expression
'zahid', #case1
'Ali', #case2
'sikander' #case3
)
print(x)
#input from user in array using for loop
rm(list = ls())
arr<-
v <- 1:5
for (i in v) {
arr[i] <- readline(prompt = "enter value: ")
}
#printing multiple strings
a<-"zahid"
b<-"ali"
c<-"raina"
print(paste(c,a,b,sep="-", collapse = ""))
#Formating
# Total number of digits displayed. Last digit rounded off.
result <- format(23.123456789, digits = 9)
print(result)
# Display numbers in scientific notation.
result <- format(c(6, 13.14521), scientific = TRUE)
print(result)
# The minimum number of digits to the right of the decimal point.
result <- format(23.47, nsmall = 5)
print(result)
# Format treats everything as a string.
result <- format(6)
print(result)
# Numbers are padded with blank in the beginning for width.
result <- format(13.7, width = 6)
print(result)
# Left justify strings.
result <- format("Hello", width = 8, justify = "1")
print(result)
# Justfy string with center.
result <- format("Hello", width = 8, justify = "c")
print(result)
#count the length of characters
nchar("zahid ali")
#to upper case
print(toupper("zahidi Ali"))
#to lower case
print(tolower("ZAhId Ali"))
#Extract character from position to position
print(substring("zahid ali", 5, 9)) #from 5 to 9, space is including
############## Atomic Vectors total 6 types
a<-"abcd" #characters
a<-12.5 #double
a<-12L #integer
a<-TRUE #logical
a<-2+3i #complex
a<-charToRaw('abcdzABCDZ01239') #Raw convertin to raw
#converting to char from raw
a<-rawToChar(a)
# Create vector with elements from 5 to 9 incrementing by 0.4.
a<-seq(1, 9, by = 0.)3
print(a)
|
52dedca4469623457123efa18e3248c3fc88cb1b | 6e88d4c9b6ecde3701a339b6c6a7b2fda27739a2 | /man/add_budget_line.Rd | 49651a08107ed059b6e039d2bd37fd18feca649f | [
"MIT"
] | permissive | MatthewHeun/ReboundTools | a5b753dedb35eab5cab8d343dd266fb24a4529d5 | ed1040616279229eb662f7ac2b9d2c3ea39673e4 | refs/heads/main | 2023-07-26T14:21:47.921003 | 2023-07-05T14:36:20 | 2023-07-05T14:36:20 | 324,182,246 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,219 | rd | add_budget_line.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iso_lines.R
\name{add_budget_line}
\alias{add_budget_line}
\title{Add a budget line to a data frame of budget lines}
\usage{
add_budget_line(
.DF = NULL,
meta,
graph_type = ReboundTools::graph_types$consumption,
line_name,
colour = ReboundTools::path_graph_params$cons_grid_colour,
linewidth = ReboundTools::path_graph_params$cons_grid_linewidth,
linetype = ReboundTools::path_graph_params$cons_grid_linetype,
slope,
intercept,
graph_df_colnames = ReboundTools::graph_df_colnames
)
}
\arguments{
\item{.DF}{The data frame to which lines are appended.
When \code{NULL}, the default, a new data frame is created and returned.
When not \code{NULL}, rows for the budget lines are added to the bottom of \code{.DF}.}
\item{meta}{A data frame of metadata for the segment to be added.
This metadata data frame provides the left-most columns of the return value.}
\item{graph_type}{The type of graph associated with this segment.
Default is See \code{ReboundTools::graph_types$consumption}.}
\item{line_name}{A name for this budget line}
\item{colour}{The colour for this budget line. Default is \code{ReboundTools::path_graph_params$cons_grid_colour}.}
\item{linewidth}{The size (width) for this budget line. Default is \code{ReboundTools::path_graph_params$cons_grid_linewidth}.}
\item{linetype}{The line type for this budget line. Default is \code{ReboundTools::path_graph_params$cons_grid_linetype}.}
\item{slope}{The slope of this budget line.}
\item{intercept}{The intercept of this budget line.}
\item{graph_df_colnames}{A list of column names in graph data frames.
Default is \code{ReboundTools::graph_df_colnames}.}
}
\value{
A version of \code{.DF} with budget lines added at the bottom.
}
\description{
Adds a budget line to a data frame.
The budget lines are accumulated in rows.
}
\details{
There is usually no need to call this function directly.
Functions like \code{iso_budget_lines_cons()} \code{add_iso()}
call \code{add_budget_line()} internally.
}
\examples{
meta <- tibble::tibble(Case = "Test case")
add_budget_line(meta = meta,
line_name = "Test budget line",
slope = -1, intercept = 5)
}
|
e9b35d40fe9959cfebe51b3922ed11c8c2db8417 | 7fc3d62d0dff3542a33cae700d0cc7e790302026 | /R/load_reanalysis.R | fcd6ef9f416f1a5f355cc3d50c5829b523fd7e84 | [] | no_license | d-farnham/ORB_Paper | c21e1fb2010f280af918917be9854aa09a51d117 | bd7871fb2c43afce4d939310a1bcbffd19beef1d | refs/heads/master | 2021-03-27T14:58:57.576475 | 2018-03-19T04:20:45 | 2018-03-19T04:20:45 | 94,268,439 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,579 | r | load_reanalysis.R | source("R/GetSeasonDate.R")
# load the reanalysis data -- start with the Z
list_Z_files = list.files(path = '/Users/davidfarnham/Google Drive/ORB_Paper/Raw_Data/REANALYSIS_data/six_hourly/Z/')
Z_700 = data.frame()
for(file_num in 1:length(list_Z_files)){
Z_ncdf = nc_open(paste0('/Users/davidfarnham/Google Drive/ORB_Paper/Raw_Data/REANALYSIS_data/six_hourly/Z/',list_Z_files[file_num]))
# extract time and lat/lon from the file
time = ncvar_get(Z_ncdf, varid = "time", start = c(1), count = c(-1))
# convert time to dates and times
date_time = as.POSIXct(time*60*60, origin = "1800-01-01 00:00:00", tz = "UTC")
# now need to make a new date column that adds 12 hours to each time so that each day is now from the priors day 12z to the current days 12z
date_time_adjust = date_time + 12 * 60 * 60
lat = ncvar_get(Z_ncdf, varid = "lat", start = c(1), count = c(-1))
lon = ncvar_get(Z_ncdf, varid = "lon", start = c(1), count = c(-1))
# hgt[lon,lat,level,time] -- only level included is 700
tmp0 = melt(ncvar_get(Z_ncdf,
varid = "hgt",
start = c(1,1,1,1),
count = c(-1,-1,1,-1)))
# insert actual lat, lon, date values and average by day
tmp0 = tmp0 %>% dplyr::mutate(lon = lon[Var1],
lat = lat[Var2],
date = as.Date(date_time_adjust[Var3]), tz = "UTC") %>%
dplyr::group_by(lat, lon, date) %>%
dplyr::summarise(Z_700 = mean(value)) %>%
dplyr::select(lon, lat, date, Z_700)
# only retain Feb 20 through May 31st
# also only retain between 1950 and 2005
tmp0 = tmp0 %>% dplyr::filter(lubridate::month(date) %in% c(2,3,4,5) &
!(lubridate::month(date) == 2 & lubridate::day(date) < 20) &
lubridate::year(date) > 1949 &
lubridate::year(date) < 2006)
if(file_num == 1){tmp = tmp0}
if(file_num > 1){tmp = rbind(tmp, tmp0)}
print(file_num)
}
Z_700 = tmp
save(Z_700, file = 'Processed_Data/Shifted_reanalysis/Z_700.RData')
# load the reanalysis data -- now load the SHUM
rm(list = ls())
list_SHUM_files = list.files(path = '/Users/davidfarnham/Google Drive/ORB_Paper/Raw_Data/REANALYSIS_data/six_hourly/SHUM/')
SHUM = data.frame()
for(file_num in 1:length(list_SHUM_files)){
SHUM_ncdf = nc_open(paste0('/Users/davidfarnham/Google Drive/ORB_Paper/Raw_Data/REANALYSIS_data/six_hourly/SHUM/',list_SHUM_files[file_num]))
# extract time and lat/lon from the file
time = ncvar_get(SHUM_ncdf, varid = "time", start = c(1), count = c(-1))
# convert time to dates and times
date_time = as.POSIXct(time*60*60, origin = "1800-01-01 00:00:00", tz = "UTC")
# now need to make a new date column that adds 12 hours to each time so that each day is now from the priors day 12z to the current days 12z
date_time_adjust = date_time + 12 * 60 * 60
lat = ncvar_get(SHUM_ncdf, varid = "lat", start = c(1), count = c(-1))
lon = ncvar_get(SHUM_ncdf, varid = "lon", start = c(1), count = c(-1))
level = ncvar_get(SHUM_ncdf, varid = "level", start = c(1), count = c(-1))
# shum[lon,lat,level,time] -- store all levels
tmp0 = melt(ncvar_get(SHUM_ncdf,
varid = "shum",
start = c(1,1,1,1),
count = c(-1,-1,-1,-1)))
# insert actual lat, lon, date values and average by day
tmp0 = tmp0 %>% dplyr::mutate(lon = lon[Var1],
lat = lat[Var2],
level = level[Var3],
date = as.Date(date_time_adjust[Var4]), tz = "UTC") %>%
dplyr::group_by(lat, lon, level, date) %>%
dplyr::summarise(SHUM = mean(value)) %>%
dplyr::select(lon, lat, level, date, SHUM)
# only retain Feb 20 through May 31st
# also only retain between 1950 and 2005
tmp0 = tmp0 %>% dplyr::filter(lubridate::month(date) %in% c(2,3,4,5) &
!(lubridate::month(date) == 2 & lubridate::day(date) < 20) &
lubridate::year(date) > 1949 &
lubridate::year(date) < 2006)
if(file_num == 1){tmp = tmp0}
if(file_num > 1){tmp = rbind(tmp, tmp0)}
print(file_num)
}
SHUM = tmp
save(SHUM, file = 'Processed_Data/Shifted_reanalysis/SHUM.RData')
# load the reanalysis data -- now load the OMG
rm(list = ls())
list_OMG_files = list.files(path = '/Users/davidfarnham/Google Drive/ORB_Paper/Raw_Data/REANALYSIS_data/six_hourly/OMEGA/')
OMG = data.frame()
for(file_num in 1:length(list_OMG_files)){
OMG_ncdf = nc_open(paste0('/Users/davidfarnham/Google Drive/ORB_Paper/Raw_Data/REANALYSIS_data/six_hourly/OMEGA/',list_OMG_files[file_num]))
# extract time and lat/lon from the file
time = ncvar_get(OMG_ncdf, varid = "time", start = c(1), count = c(-1))
# convert time to dates and times
date_time = as.POSIXct(time*60*60, origin = "1800-01-01 00:00:00", tz = "UTC")
# now need to make a new date column that adds 12 hours to each time so that each day is now from the priors day 12z to the current days 12z
date_time_adjust = date_time + 12 * 60 * 60
lat = ncvar_get(OMG_ncdf, varid = "lat", start = c(1), count = c(-1))
lon = ncvar_get(OMG_ncdf, varid = "lon", start = c(1), count = c(-1))
level = ncvar_get(OMG_ncdf, varid = "level", start = c(1), count = c(-1))
# omega[lon,lat,level,time] -- store all levels
tmp0 = melt(ncvar_get(OMG_ncdf,
varid = "omega",
start = c(1,1,1,1),
count = c(-1,-1,-1,-1)))
# insert actual lat, lon, date values and average by day
tmp0 = tmp0 %>% dplyr::mutate(lon = lon[Var1],
lat = lat[Var2],
level = level[Var3],
date = as.Date(date_time_adjust[Var4]), tz = "UTC") %>%
dplyr::group_by(lat, lon, level, date) %>%
dplyr::summarise(OMG = mean(value)) %>%
dplyr::select(lon, lat, level, date, OMG)
# only retain Feb 20 through May 31st
# also only retain between 1950 and 2005
tmp0 = tmp0 %>% dplyr::filter(lubridate::month(date) %in% c(2,3,4,5) &
!(lubridate::month(date) == 2 & lubridate::day(date) < 20) &
lubridate::year(date) > 1949 &
lubridate::year(date) < 2006)
if(file_num == 1){tmp = tmp0}
if(file_num > 1){tmp = rbind(tmp, tmp0)}
print(file_num)
}
OMG = tmp
save(OMG, file = 'Processed_Data/Shifted_reanalysis/OMG.RData')
# now load and save the U_200 monthly reanalysis fields
rm(list = ls())
source('R/GetSeasonDate.R')
# load and save the U_200 field -- monthly data
reanal_output = nc_open('/Users/davidfarnham/Google Drive/ORB_Paper/Raw_Data/REANALYSIS_data/u_200_REANALYSIS.nc')
time = ncvar_get(reanal_output, varid = "T", start = c(1), count = c(-1))
# data spans from jan 1949 to mar 2017
dates = data.frame(year = c(rep(1949:2016, each = 12),rep(2017,3)),
month = c(rep(1:12,(2016-1949+1)),1:3),
day = 1)
date = as.Date(paste(dates$year,
dates$month,
dates$day, sep = "-"),
origin = "1948-01-01 00:00:00")
lat = ncvar_get(reanal_output, varid = "Y", start = c(1), count = c(-1))
lon = ncvar_get(reanal_output, varid = "X", start = c(1), count = c(-1))
start_date = which(date > "1949-12-31" &
date < "2005-01-01")[1]
count_date = sum(date > "1949-12-31" &
date < "2005-01-01")
U_200 = melt(ncvar_get(reanal_output,
varid = "u",
start = c(1,1,1,start_date),
count = c(length(lon),length(lat),1,count_date)))
U_200 = U_200 %>% dplyr::mutate(lon = lon[Var1],
lat = lat[Var2],
date = date[which(date > "1949-12-31" &
date < "2005-01-01")][Var3],
season = GetSeasonDate(date),
u_200 = value) %>%
dplyr::select(c(date, season, lon, lat, u_200))
U_200_MAM = U_200 %>% dplyr::filter(season %in% c("MAM"))
save(U_200_MAM, file = 'Processed_Data/U_200_MAM.RData')
rm(list = ls())
|
b98935a01e556f4fae960d80540d1d2ce2b617db | 84dcc770d7766a3171efe7aa46e50dbcb496c9b5 | /Spatial_CUSUM/dependence.R | 421e982bb526cea849ec077468a32e4d32e3a775 | [] | no_license | xinzhang-nac/Spatial_CUSUM | 0b36e1a04526ea26578d86c41a2eaf1ffa980023 | 677eedf773af01525d788f2d2ee9932a1d8f4541 | refs/heads/master | 2020-04-09T03:58:59.994822 | 2019-05-15T14:31:29 | 2019-05-15T14:31:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,774 | r | dependence.R | multi_iter<-function(N.grid=100,shift=0,scale=0,times=10,k=5,alpha=0.05){
#generating data
Status<-rep(0,(N.grid)^2)
grid.point = expand.grid(x=seq(1,N.grid,by=1),y=seq(1,N.grid,by=1))
if(scale==0){
#nondependence
grid.point$Observed<-rnorm((N.grid)^2)
}else{
#dependence
model <- RMexp(var=1, scale=scale)
simu.alti <- RFsimulate(model, x=grid.point$x,y=grid.point$y)
grid.point$Observed = simu.alti@data$variable1
}
grid.point$Status<-Status
grid.point[which(grid.point$x<=0.5*N.grid&grid.point$x>=.2*N.grid&grid.point$y<=.5*N.grid&grid.point$y>=.2*N.grid),4]<-1
grid.point[which(grid.point$x<=0.5*N.grid&grid.point$x>=.3*N.grid&grid.point$y<=.5*N.grid&grid.point$y>=.3*N.grid),4]<-0
grid.point[which(grid.point$x<=0.9*N.grid&grid.point$x>=.6*N.grid&grid.point$y<=.9*N.grid&grid.point$y>=.6*N.grid),4]<-1
grid.point[which(grid.point$x<=0.78*N.grid&grid.point$x>=.73*N.grid&grid.point$y<=.9*N.grid&grid.point$y>=.85*N.grid),4]<-0
grid.point[which(grid.point$x<=0.78*N.grid&grid.point$x>=.73*N.grid&grid.point$y<=.65*N.grid&grid.point$y>=.6*N.grid),4]<-0
grid.point[which(grid.point$x<=0.8*N.grid&grid.point$x>=.7*N.grid&grid.point$y<=.8*N.grid&grid.point$y>=.7*N.grid),4]<-0
grid.point[grid.point$Status==1,]$Observed<-grid.point[grid.point$Status==1,]$Observed+shift
#testing whether stationary
#fdrl_method
pvalues<-pvalue_normal(grid.point$Observed)
fdrl<-FDRLMethod(pvalues,grid.point$x,grid.point$y,window=(k-1)/2,alpha)
fdrl_Res<-grid.point$Status-fdrl$ind
fdrl_wrong<-length(fdrl_Res[fdrl_Res==-1])
fdrl_miss<-length(fdrl_Res[fdrl_Res==1])
fdrl_fdr<-fdrl_wrong/fdrl$numAlt
if(is.nan(fdrl_fdr)){fdrl_fdr=0}
detection_prob<-signal_detection(grid.point,k=k,times=times,N.grid=N.grid)
x_seq<-seq(0,1,length.out = times*k^2)
est <- lpdensity(data = detection_prob,grid=x_seq, bwselect = "imse-rot")$Estimate[,4]
est[est<0]=0
location<-as.numeric(names(which.min(est[x_seq>=quantile(detection_prob,0.5)])))
h0<-c(est[1:location],seq(est[location],0,length.out=length(x_seq)-location))
ratio<-rev(cumsum(rev(h0))/cumsum(rev(est)))
ret<-detection_prob>=(x_seq[which.max(ratio<=alpha)])
result<-c(fdrl$ind,ret)
return(result)
}
# ggplot(grid.point)+geom_point(aes(x=x,y=y,colour=detection))+scale_color_continuous(low="white",high="black",limits=c(0,1))+theme(legend.position='none')
library(doParallel)
set.seed(1)
cl=makeCluster(13)
registerDoParallel(cl)
s_2=foreach(m_time=1:100,.combine=rbind,
.packages=c("RandomFields","foreach","lpdensity"),
.errorhandling="remove")%dopar%
{
multi_iter(N.grid=100,shift=1,scale=0.3,times=50,k=5,alpha=0.05)
}
stopCluster(cl)
saveRDS(s_2,"/vol/data/zhuz/xinzhang/spatial_cusum/dependence_2.rds")
set.seed(1)
cl=makeCluster(13)
registerDoParallel(cl)
s_1=foreach(m_time=1:100,.combine=rbind,
.packages=c("RandomFields","foreach","lpdensity"),
.errorhandling="remove")%dopar%
{
multi_iter(N.grid=100,shift=1,scale=0.1,times=50,k=5,alpha=0.05)
}
stopCluster(cl)
saveRDS(s_1,"/vol/data/zhuz/xinzhang/spatial_cusum/dependence_1.rds")
set.seed(1)
cl=makeCluster(13)
registerDoParallel(cl)
s_3=foreach(m_time=1:100,.combine=rbind,
.packages=c("RandomFields","foreach","lpdensity"),
.errorhandling="remove")%dopar%
{
multi_iter(N.grid=100,shift=1,scale=0.5,times=50,k=5,alpha=0.05)
}
stopCluster(cl)
saveRDS(s_3,"/vol/data/zhuz/xinzhang/spatial_cusum/dependence_3.rds")
library(ggplot2)
library(grid)
library(tidyr)
s1<-colMeans(readRDS("/Users/Allen/Documents/Research/CODE/R/Spatial_CUSUM/result2_image/dependence_1.rds"))
fdrl_1<-s1[1:10000]
cusum_1<-s1[10001:20000]
s2<-colMeans(readRDS("/Users/Allen/Documents/Research/CODE/R/Spatial_CUSUM/result2_image/dependence_2.rds"))
fdrl_2<-s2[1:10000]
cusum_2<-s2[10001:20000]
s3<-colMeans(readRDS("/Users/Allen/Documents/Research/CODE/R/Spatial_CUSUM/result2_image/dependence_3.rds"))
fdrl_3<-s3[1:10000]
cusum_3<-s3[10001:20000]
library(dplyr)
library(ggplot2)
plot_data <- grid.point %>% select(x, y) %>%
mutate(`FDRL with dependence scale=0.1` = fdrl_1,
`SCUSUM with dependence scale=0.1` = cusum_1,
`FDRL with dependence scale=0.3` = fdrl_2,
`SCUSUM with dependence scale=0.3` = cusum_2,
`FDRL with dependence scale=0.5` = fdrl_3,
`SCUSUM with dependence scale=0.5` = cusum_3)
all_data <- plot_data %>% gather(title, probability, -x, -y)
ggplot(all_data, aes(x = x, y = y, color = probability)) +
geom_point() +
scale_colour_continuous(limits=c(0,1),low="white",high="black") +
facet_wrap(~title, nrow = 2, ncol = 3) +
xlab("")+ylab("")+
theme(panel.grid.major =element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(),axis.text = element_blank(),axis.ticks = element_blank(),legend.title=element_blank())
s_1<-readRDS("/Users/Allen/Documents/Research/CODE/R/Spatial_CUSUM/result2_image/dependence_1.rds")
fdrl_1<-mean(s_1[,1:10000]%*%(1-grid.point$Status)/rowSums(s_1[,1:10000]))
cusum_1<-mean(s_1[,10001:20000]%*%(1-grid.point$Status)/rowSums(s_1[,10001:20000]))
s_2<-readRDS("/Users/Allen/Documents/Research/CODE/R/Spatial_CUSUM/result2_image/dependence_2.rds")
fdrl_2<-mean(s_2[,1:10000]%*%(1-grid.point$Status)/rowSums(s_2[,1:10000]))
cusum_2<-mean(s_2[,10001:20000]%*%(1-grid.point$Status)/rowSums(s_2[,10001:20000]))
s_3<-readRDS("/Users/Allen/Documents/Research/CODE/R/Spatial_CUSUM/result2_image/dependence_3.rds")
fdrl_3<-mean(s_3[,1:10000]%*%(1-grid.point$Status)/rowSums(s_3[,1:10000]),na.rm=1)
cusum_3<-mean(s_3[,10001:20000]%*%(1-grid.point$Status)/rowSums(s_3[,10001:20000]))
|
488af0c79cf17980210e27a25c0f6cfa18a4330c | b437adefdb097c34f01f2470790ef8c6fe3648df | /scripts/Niche_Models/old/PaleoNicheModels.R | 9c03e6b940c8fb9e91cdee9067d4895eb2c66d8e | [] | no_license | kaiyaprovost/GDM_pipeline | 607887afed2f6faddb2584eebb9eb7ff0120fea1 | 05e8f5d0a46781d727b60fe913c94137b9b35824 | refs/heads/master | 2022-09-20T22:55:39.707325 | 2022-08-31T18:24:19 | 2022-08-31T18:24:19 | 237,048,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,768 | r | PaleoNicheModels.R | require(dismo)
require(ENMeval)
require(phyloclim)
require(sp)
require(rgdal)
require(rgeos)
require(ENMTools)
require(spocc)
taxon = 'Ambystoma_tigrinum'
#Build a best model
#get occ
occ = occ(taxon, from = 'gbif', limit =900)
occdf = occ2df(occ)
summary(occ)
#get climate
# Env = stack("/data/spbio/climgrids/bio.gri")
Env = stack("~/envirem.gri")
ext = extent(c(-125, -40, 25, 60))
Env = crop(Env, ext)
#clean
loc = cbind(occdf$longitude, occdf$latitude)
loc = loc[loc[,1]<= -40,]
loc = na.omit(loc)
df = data.frame(occdf)
extr = extract(Env, cbind(df$longitude, df$latitude))
df = df[!is.na(extr[,1]),]
#thin
require(spThin)
df = data.frame(occdf)
extr = extract(Env, cbind(df$longitude, df$latitude))
df = df[!is.na(extr[,1]),]
thin<-thin(loc.data = df,
lat.col = "latitude",
long.col = "longitude",
spec.col = "name",
thin.par = 10,
reps = 10,
locs.thinned.list.return = T,
write.files = T,
max.files = 2,
out.dir = paste(taxon, "/", sep=''),
write.log.file = T)
newthin = read.csv(paste(taxon, "/thinned_data_thin1.csv", sep=''))
#Buffer background sampling: 200km
#library(ENMTools) #if not already loaded
#with all points (before thinning)
#bg1 = background.points.buffer(df[,2:3], radius = 500000, n = 5000, mask = Env[[1]])
#or after thinning
bg2 = background.points.buffer(newthin[,2:3], radius = 200000, n = 5000, mask = Env[[1]])
plot(Env[[1]], col = viridis::viridis(99))
points(bg2, col = 'grey')
points(newthin[,2:3], col = 'red', pch = 20)
#ENMeval model testing
thinres = ENMevaluate(occ=newthin[,2:3], env = Env, method='block', parallel=T, numCores=4, fc=c("L", "LQ", "H"), RMvalues=seq(1,4,1), rasterPreds=F)
print(thinres@results)
eval.plot(thinres@results, "Mean.AUC", )
#predictbest
setsort = thinres@results[order(thinres@results[,'Mean.ORmin']),]
setsort2 = setsort[order(setsort[,'Mean.AUC'], decreasing=TRUE),]
top = setsort2[1,]
best.thin = which(as.character(thinres@results[,1]) == as.character(setsort2[1,1]))
pred.thin = predict(Env, thinres@models[[best.thin]])
plot(pred.thin, col=viridis::viridis(99))
#thresholding
ev.set <- evaluate(newthin[,2:3], [email protected], thinres@models[[best.thin]], Env)
th1 = threshold(ev.set)
p1.nomit = pred.thin >= th1$no_omission
p1.equal = pred.thin >= th1$equal_sens_spec ##probably this one is better
Paleoclimate
lgm = stack('/data/spbio/climgrids/miroc/lgm_envirem.gri')
names(lgm) = names(Env) #Make names match
lgm = crop(lgm, extent(Env))
lgm.pred.thin = predict(lgm, thinres@models[[best.thin]])
lgm.binary = lgm.pred.thin>= th1$equal_sens_spec
plot(lgm.binary+p1.equal, col = c('black','red', 'green', 'blue')) |
b356bccf7e4150dbe06e5ffd96033e1ae4b3f185 | 03f9b872f9e89453d1faf9b545d23fbad83bb303 | /R/run_cstacks.R | fce61a23878e1389bc4a8d961d719ff9aae70802 | [] | no_license | kawu001/stackr | 99fa54f4b4e1c8194550752bb238864597442c08 | 684b29b9895c773f48d0e58cba3af22fc2c98a56 | refs/heads/master | 2023-01-06T06:47:55.234575 | 2020-11-05T13:51:20 | 2020-11-05T13:51:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,236 | r | run_cstacks.R | #' @name run_cstacks
#' @title Run STACKS cstacks module
#' @description Run \href{http://catchenlab.life.illinois.edu/stacks/}{STACKS}
#' \href{http://catchenlab.life.illinois.edu/stacks/comp/cstacks.php}{cstacks}
#' module inside R! The function runs a summary of the log file automatically
#' at the end (\code{\link{summary_cstacks}}).
#' @param P path to the directory containing STACKS files.
#' Default: \code{P = "06_ustacks_2_gstacks"}.
#' Inside the folder \code{06_ustacks_2_gstacks}, you should have:
#' \itemize{
#' \item \strong{4 files for each samples:} The sample name is the prefix of
#' the files ending with:
#' \code{.alleles.tsv.gz, .models.tsv.gz, .snps.tsv.gz, .tags.tsv.gz}.
#' Those files are created in the
#' \href{http://catchenlab.life.illinois.edu/stacks/comp/ustacks.php}{ustacks}
#' module.
#' }
#' @param o Output path to write catalog.
#' Default: \code{o = "06_ustacks_2_gstacks"}
#' @param M path to a population map file (Required when P is used).
#' Default: \code{M = "02_project_info/population.map.catalog.tsv"}.
#' @param n number of mismatches allowed between sample loci when build the catalog.
#' Default: \code{n = 1}
#' @param parallel.core Enable parallel execution with num_threads threads.
#' Default: \code{parallel.core = parallel::detectCores() - 1}
#' @param catalog.path This is for the "Catalog editing" part in cstacks where
#' you can provide the path to an existing catalog.
#' cstacks will add data to this existing catalog.
#' With default: \code{catalog.path = NULL} or with a supplied path, the function
#' The function scan automatically for the presence of a catalog inside the input folder.
#' If none is found, a new catalog is created.
#' If your catalog is not in the input folder, supply a path here.
#' e.g. \code{catalog.path = ~/catalog_folder}, the catalog files are inside the
#' P folder along the samples files and detected automatically.
#' If a catalog is detected in the input folder,
#' the samples in the \code{sample.list} argument
#' will be added in this catalog. The catalog is made of 3 files:
#' \code{catalog.alleles.tsv.gz, catalog.snps.tsv.gz, catalog.tags.tsv.gz}
#' @param max.gaps The number of gaps allowed between stacks before merging.
#' Default: \code{max.gaps = 2}
#' @param min.aln.len The minimum length of aligned sequence in a gapped
#' alignment.
#' Default: \code{min.aln.len = 0.8}
#' @param disable.gapped Disable gapped alignments between stacks.
#' Default: \code{disable.gapped = FALSE} (use gapped alignments).
#' @param k.len Specify k-mer size for matching between between catalog loci
#' (automatically calculated by default).
#' Advice: don't modify.
#' Default: \code{k.len = NULL}
#' @param report.mmatches Report query loci that match more than one catalog locus.
#' Advice: don't modify.
#' Default: \code{report.mmatches = FALSE}
#' @param split.catalog (integer) In how many samples you want to split the
#' catalog population map. This allows to have backup catalog every
#' \code{split.catalog} samples. Their is obviously a trade-off between the
#' integer use here, the time to initialize an existing catalog to often
#' and re-starting from zero if everything crash.
#' Default: \code{split.catalog = 20}.
#' @rdname run_cstacks
#' @export
#' @return \href{http://catchenlab.life.illinois.edu/stacks/comp/sstacks.php}{sstacks}
#' returns a \code{.matches.tsv.gz file for each sample}
#' @details \strong{Computer or server problem during the cstacks ?} Look
#' in the log file to see which individuals remains to be included. Create a
#' new list of individuals to include and use the catalog.path argument to point
#' to the catalog created before the problem.
#' @examples
#' \dontrun{
#' # The simplest form of the function:
#' run_cstacks()
#' # that's it ! Now if you have your own workflow folders, etc. See below.
#' Next example, let say you only want to include 10 individuals/pop and
#' include in the catalog samples with more than 2000000 reads. With the project
#' info file in the global environment:
#' library(tidyverse)
#' individuals.catalog <- project.info.file) %>%
#' filter(RETAINED > 2000000) %>%
#' group_by(POP_ID) %>%
#' sample_n(size = 10, replace = FALSE) %>%
#' ungroup %>%
#' arrange(desc(RETAINED)) %>%
#' distinct(INDIVIDUALS_REP, POP_ID)
#' # Write file to disk
#' readr::write_tsv(x = individuals.catalog,
#' path = "02_project_info/population.map.catalog.tsv")
#' # The next line will give you the list of individuals to include
#' individuals.catalog <- individuals.catalog$INDIVIDUALS_REP
#'
#' # To keep your info file updated with this information:
#' project.info.file <- project.info.file %>%
#' mutate(CATALOG = if_else(INDIVIDUALS_REP %in% individuals.catalog,
#' true = "catalog", false = "not_catalog")
#' )
#' write_tsv(project.info.file, "project.info.catalog.tsv")
#'
#' # Then run the command this way:
#' run_cstacks (
#' P = "06_ustacks_2_gstacks",
#' catalog.path = NULL,
#' n = 1,
#' parallel.core = 32,
#' h = FALSE,
#' max.gaps = 2, min.aln.len = 0.8,
#' k.len = NULL, report.mmatches = FALSE
#' )
#' }
#' @seealso
#' \href{http://catchenlab.life.illinois.edu/stacks/comp/sstacks.php}{sstacks}
#' @references Catchen JM, Amores A, Hohenlohe PA et al. (2011)
#' Stacks: Building and Genotyping Loci De Novo From Short-Read Sequences.
#' G3, 1, 171-182.
#' @references Catchen JM, Hohenlohe PA, Bassham S, Amores A, Cresko WA (2013)
#' Stacks: an analysis tool set for population genomics.
#' Molecular Ecology, 22, 3124-3140.
run_cstacks <- function(
P = "06_ustacks_2_gstacks",
o = "06_ustacks_2_gstacks",
M = "02_project_info/population.map.catalog.tsv",
catalog.path = NULL,
n = 1,
parallel.core = parallel::detectCores() - 1,
max.gaps = 2, min.aln.len = 0.8, disable.gapped = FALSE,
k.len = NULL, report.mmatches = FALSE,
split.catalog = 20
) {
# TEST
# P = "06_ustacks_2_gstacks"
# o = "06_ustacks_2_gstacks"
# M = "02_project_info/population.map.catalog.tsv"
# n = 3
# parallel.core = parallel::detectCores() - 1
# catalog.path = NULL
# catalog.path = "/Volumes/THIERRY_MAC/sturgeons_saskatchewan/sturgeon_saskatchewan/catalog_test/catalog_2"
# max.gaps = 2
# min.aln.len = 0.8
# disable.gapped = FALSE
# k.len = NULL
# report.mmatches = FALSE
# h = FALSE
# split.catalog = 20
cat("#######################################################################\n")
cat("######################## stackr::run_cstacks ##########################\n")
cat("#######################################################################\n")
timing <- proc.time()
# Check directory ------------------------------------------------------------
if (!dir.exists(P)) stop("Missing P directory")
if (!dir.exists("09_log_files")) dir.create("09_log_files")
if (!dir.exists("08_stacks_results")) dir.create("08_stacks_results")
# spliting catalog -----------------------------------------------------------
pop.map <- readr::read_tsv(file = M, col_names = c("INDIVIDUALS", "STRATA"), col_types = "cc")
if (nrow(pop.map) < split.catalog) {
split.catalog <- nrow(pop.map)
}
if (is.null(catalog.path)) catalog.path <- "06_ustacks_2_gstacks"
split.catalog <- split_pop_map(
pop.map = pop.map,
split.catalog = split.catalog,
catalog.path = catalog.path
)
message("Number of catalog split: ", length(split.catalog$list.catalog.numbers), "\n")
# run cstacks on split pop map -----------------------------------------------
purrr::pwalk(
.l = list(
catalog.id = split.catalog$list.catalog.numbers,
o = split.catalog$catalog.output,
M = split.catalog$catalog.pop.map,
catalog.path = split.catalog$catalog.path
),
.f = run_split_cstacks,
P = "06_ustacks_2_gstacks",
n = n,
parallel.core = parallel.core,
max.gaps = max.gaps,
min.aln.len = min.aln.len,
disable.gapped = disable.gapped,
k.len = k.len,
report.mmatches = report.mmatches
)
# Copy the last catalog to the output folder ---------------------------------
file.copy(
from = list.files(
path = split.catalog$catalog.output[length(split.catalog$list.catalog.numbers)],
full.names = TRUE
),
to = P,
overwrite = TRUE,
recursive = TRUE,
copy.date = TRUE
)
message("\nCatalog files written in: ", P)
timing <- proc.time() - timing
message("\nOverall computation time: ", round(timing[[3]]), " sec")
cat("############################## completed ##############################\n")
} # End run_cstacks
# split_pop_map ----------------------------------------------------------------
#' @title split_pop_map
#' @description Split the catalog map file
#' @rdname split_pop_map
#' @export
#' @keywords internal
split_pop_map <- function(pop.map, split.catalog, catalog.path) {
n.ind <- nrow(pop.map)
pop.map.temp <- tibble::add_column(
.data = pop.map,
SPLIT_VEC = sort(rep.int(x = 1:ceiling(n.ind/split.catalog), times = split.catalog))[1:n.ind]
) %>%
dplyr::group_by(SPLIT_VEC) %>%
dplyr::group_split(.tbl = .)
write_catalog_pop_map <- function(pop.map.temp) {
c.id <- unique(pop.map.temp$SPLIT_VEC)
c.path <- paste0("08_stacks_results/catalog_temp_", c.id)
if (!dir.exists(c.path)) dir.create(c.path)
c.filename <- file.path("02_project_info", paste0("pop_map_catalog_", c.id, ".tsv"))
readr::write_tsv(
x = dplyr::select(pop.map.temp, -SPLIT_VEC),
path = c.filename,
col_names = FALSE
)
return(res = list(catalog.path = c.path, catalog.pop.map = c.filename))
} #End write_catalog_pop_map
catalog.info <- purrr::map(.x = pop.map.temp, .f = write_catalog_pop_map)
return(
list(
list.catalog.numbers = 1:length(pop.map.temp),
catalog.output = purrr::map_chr(catalog.info, 1),
catalog.path = c(catalog.path, purrr::map_chr(catalog.info, 1)[c(1:(length(pop.map.temp) - 1))]),
catalog.pop.map = purrr::map_chr(catalog.info, 2)
)
)
}#End split_pop_map
# run_split_cstacks ----------------------------------------------------------------
#' @title run_split_cstacks
#' @description The function that runs cstacks
#' @rdname run_split_cstacks
#' @export
#' @keywords internal
run_split_cstacks <- function(
catalog.id = NULL,
o = "06_ustacks_2_gstacks",
M = "02_project_info/population.map.catalog.tsv",
catalog.path = NULL,
P = "06_ustacks_2_gstacks",
n = 1,
parallel.core = parallel::detectCores() - 1,
max.gaps = 2,
min.aln.len = 0.8,
disable.gapped = FALSE,
k.len = NULL,
report.mmatches = FALSE
) {
timing.catalog <- proc.time()
if (!is.null(catalog.id)) message("Generating catalog: ", catalog.id)
# o = split.catalog$catalog.path
# M = split.catalog$catalog.pop.map
# Existing catalog -----------------------------------------------------------
if (is.null(catalog.path)) { # no catalog path, searching in the input path...
old.catalog <- list.files(path = P, pattern = "catalog")
detect.rxstacks.lnls <- list.files(path = P, pattern = "rxstacks_lnls")
if (length(detect.rxstacks.lnls) == 1) {
old.catalog <- purrr::discard(.x = old.catalog, .p = old.catalog %in% detect.rxstacks.lnls)
}
if (length(old.catalog) > 0 & length(old.catalog) == 3) {
message("Found a catalog in the input folder, using files: ")
message(stringi::stri_join(old.catalog, "\n"))
catalog.path <- stringi::stri_replace_all_fixed(
str = old.catalog[1],
pattern = ".catalog.alleles.tsv.gz",
replacement = "",
vectorize_all = FALSE
)
catalog.path <- stringi::stri_join(P, "/", catalog.path)
catalog.path <- stringi::stri_join("--catalog ", shQuote(catalog.path))
}
if (length(old.catalog) > 0 & length(old.catalog) < 3) {
stop("Incomplete catalog, 3 files are required, see argument documentation")
}
if (length(old.catalog) == 0) {
message("Builing catalog for the first time")
catalog.path <- ""
}
} else {
# Building catalog for the first time --------------------------------------
old.catalog <- list.files(path = catalog.path, pattern = "catalog")
old.catalog <- purrr::keep(.x = old.catalog, .p = !old.catalog %in% "catalog.sql.ids.tsv")
if (length(old.catalog) > 0) {
if (length(old.catalog) < 3) {
stop("Incomplete catalog, 3 files are required, see argument documentation")
}
message("Existing catalog: yes")
message(stringi::stri_join(old.catalog, "\n"))
catalog.path <- file.path(
catalog.path,
stringi::stri_replace_all_fixed(
str = old.catalog[1],
pattern = ".alleles.tsv.gz",
replacement = "",
vectorize_all = FALSE
))
catalog.path <- stringi::stri_join("--catalog ", shQuote(catalog.path))
}
if (length(old.catalog) == 0) {
message("Builing catalog for the first time")
catalog.path <- ""
}
}
# cstacks options ------------------------------------------------------------
# P <- stringi::stri_join("-P ", P)
# M <- stringi::stri_join("-M ", M)
# sample for catalog
sc <- readr::read_tsv(file = M, col_names = c("INDIVIDUALS", "STRATA"), col_types = "cc") %>%
dplyr::select(INDIVIDUALS) %>%
purrr::flatten_chr(.)
sc <- stringi::stri_join("-s ", file.path(P, sc), collapse = " ")
n <- stringi::stri_join("-n ", n)
p <- stringi::stri_join("-p ", parallel.core)
o <- stringi::stri_join("-o ", o)
# gapped assembly options ---------------------------------------------------
max.gaps <- stringi::stri_join("--max-gaps ", max.gaps)
min.aln.len <- stringi::stri_join("--min-aln-len ", min.aln.len)
if (disable.gapped) {
disable.gapped <- stringi::stri_join("--disable-gapped ")
} else {
disable.gapped <- ""
}
# Advanced options -----------------------------------------------------------
if (is.null(k.len)) {
k.len <- ""
} else {
k.len <- stringi::stri_join("--k-len ", k.len)
}
if (report.mmatches) {
report.mmatches <- stringi::stri_join("--report-mmatches ")
} else {
report.mmatches <- ""
}
# logs files -----------------------------------------------------------------
file.date.time <- format(Sys.time(), "%Y%m%d@%H%M")
cstacks.log.file <- stringi::stri_join("09_log_files/cstacks_", file.date.time,".log")
message(stringi::stri_join("For progress, look in the log file:\n", cstacks.log.file))
# command args ---------------------------------------------------------------
command.arguments <- paste(
# P, M,
sc,
n, p, catalog.path, o,
max.gaps, min.aln.len, disable.gapped,
k.len, report.mmatches
)
# command
system2(command = "cstacks", args = command.arguments, stderr = cstacks.log.file)
# Summary cstacks ------------------------------------------------------------
sum <- stackr::summary_cstacks(
cstacks.log = cstacks.log.file,
verbose = FALSE
)
sum <- NULL
if (!is.null(catalog.id)) {
timing.catalog <- proc.time() - timing.catalog
message("\nComputation time to build catalog ", catalog.id, ": ", round(timing.catalog[[3]]), " sec\n")
}
}# end run_split_cstacks
|
ad51dfdc6ffe52cdeee4f8e74a042b5b6d3d1976 | a0d43f26abeafbd8c159b9afbfca2e7582636092 | /Mestre dos Derivativos/Volatilidade.R | af67b3c10167da634b35045fbccbb6f8a04838ce | [
"MIT"
] | permissive | tarsoqueiroz/Rlang | f22038a0ada392d641cafecee1a0a91ba8574110 | b2d4fdd967ec376fbf9ddb4a7250c11d3abab52e | refs/heads/master | 2021-06-02T11:47:56.188320 | 2021-04-09T20:37:38 | 2021-04-09T20:37:38 | 132,541,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,770 | r | Volatilidade.R | #
# Ensaio para Volatilidade
#
# Source in "Mestre dos Derivativos/Ensaios"
# Vol BMF&Bovespa - 2018.08.13 0.0203835155197 0.0466064087617 0.0372254792514 0.0299117135965 <=
# Base MT5 - 2018.08.08 0.02071104 0.04883672 0.03799101 0.03005564
# 2018.08.09 0.02042073 0.04778096 0.03775854 0.03005287 ***
# 2018.08.10 0.02182746 0.04767734 0.03766033 0.03014030
# Vol BMF&Bovespa - 2018.08.14 0.0220324806313 0.0466104580764 0.0371338926280 0.0299686443762 <=
# Base MT5 - 2018.08.10 0.02187254892 0.04769893185 0.03768746170 0.03015367214
# 2018.08.13 0.02150177215 0.04780651293 0.03766180030 0.03013223562
# 2018.08.14 0.02152028894 0.04760380083 0.03761695170 0.03011460879
# Vol BMF&Bovespa - 2018.08.15 0.0215387569771 0.0464104720842 0.0370694165307 0.0299651303758
# Base MT5 - 2018.08.13 0.02150177215 0.04780651293 0.03766180030 0.03013223562
# 2018.08.14 0.02152028894 0.04760380083 0.03761695170 0.03011460879
# 2018.08.15 0.02400055762 0.04780177191 0.03783459221 0.03027172255
# Vol BMF&Bovespa - 2018.08.16 0.0236100678421 0.0466216860583 0.0373072960817 0.0301250864515
# Base MT5 - 2018.08.14 0.02152029 0.04760380 0.03761695 0.03011461
# 2018.08.15 0.02400056 0.04780177 0.03783459 0.03027172
# 2018.08.16 0.02364351 0.04767408 0.03777023 0.03026831 ***
c(0.02042073, 0.04778096, 0.03775854, 0.03005287) * (sqrt(252) * 100)
c(0.0203835155197, 0.0466064087617, 0.0372254792514, 0.0299117135965) * (sqrt(252) * 100)
tickDayPetr4 <- read.csv(file.choose(), se="\t", header = T)
head(tickDayPetr4)
tickClosePetr4 <- tickDayPetr4$X.CLOSE.
tickClosePetr4
plot(tickClosePetr4,
type = 'l',
main = "PETR4",
xlab = "Data",
ylab = "R$")
length(tickClosePetr4)
diffClosePetr4 <- c()
varClosePetr4 <- c()
logVarClosePetr4 <- c()
sd021Petr4 <- c()
sd063Petr4 <- c()
sd126Petr4 <- c()
sd252Petr4 <- c()
for (nSx1 in 1:length(tickClosePetr4)) {
diffClosePetr4 <- c(diffClosePetr4, 0.0)
varClosePetr4 <- c(varClosePetr4, 0.0)
logVarClosePetr4 <- c(logVarClosePetr4, 0.0)
sd021Petr4 <- c(sd021Petr4, 0.0)
sd063Petr4 <- c(sd063Petr4, 0.0)
sd126Petr4 <- c(sd126Petr4, 0.0)
sd252Petr4 <- c(sd252Petr4, 0.0)
if (nSx1 > 1) {
temp1 <- nSx1 - 1
diffClosePetr4[nSx1] <- tickClosePetr4[nSx1] - tickClosePetr4[nSx1-1]
varClosePetr4[nSx1] <- tickClosePetr4[nSx1] / tickClosePetr4[nSx1-1]
logVarClosePetr4[nSx1] <- log(varClosePetr4[nSx1])
if (nSx1 > 21) {
temp21 <- nSx1 - 21
sd021Petr4[nSx1] <- sd(logVarClosePetr4[(nSx1-21):nSx1])
if (nSx1 > 63) {
temp63 <- nSx1 - 63
sd063Petr4[nSx1] <- sd(logVarClosePetr4[(nSx1-63):nSx1])
if (nSx1 > 126) {
temp126 <- nSx1 - 126
sd126Petr4[nSx1] <- sd(logVarClosePetr4[(nSx1-126):nSx1])
if (nSx1 > 252) {
temp252 <- nSx1 - 252
sd252Petr4[nSx1] <- sd(logVarClosePetr4[(nSx1-252):nSx1])
}
}
}
}
}
}
volPetr4 <- data.frame(tickDayPetr4$X.DATE.,
tickDayPetr4$X.CLOSE.,
diffClosePetr4,
varClosePetr4,
logVarClosePetr4,
sd021Petr4,
sd063Petr4,
sd126Petr4,
sd252Petr4)
colnames(volPetr4) <- c("Dt", "Close", "Diff", "Var", "Log", "sd21d", "sd63d", "sd126d", "sd252d")
tail(volPetr4)
|
4903c3f6e5539cbc83ce1a5dc0328e2054866b32 | d033124f40b197e390209da730fb87e8639426c6 | /inst/app/simple_app.R | 6edcd6688200de48f2df57ebe6df4cea745033c1 | [
"MIT"
] | permissive | hamilton-institute/hamiltonThemes | 677ffa263cd8225386093208c1c4a8219a938672 | eacbf6b0917fd3fd7592dc28388855fb378a3654 | refs/heads/master | 2023-01-25T04:40:53.968597 | 2020-12-08T19:36:59 | 2020-12-08T19:36:59 | 311,664,902 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,025 | r | simple_app.R | ui <- bs4Dash::bs4DashPage(
sidebar_collapsed = TRUE,
sidebar_mini = FALSE,
body = bs4Dash::bs4DashBody(
hamiltonThemes::use_bs4Dash_distill_theme(),
hamiltonThemes::use_bs4Dash_distill_css(),
shiny::fluidRow(
bs4Dash::column(
width = 4,
shiny::br(),
shiny::selectInput(
"variable",
label = "Select a variable:",
choices = names(mtcars)
)
),
bs4Dash::column(
width = 8,
shiny::br(),
shiny::h4(style = "color: black;", "Figure guide:"),
shiny::p(style = "color: black;", "Each point relates to a car model."),
hamiltonThemes::distill_load_spinner(shiny::plotOutput("plot"))
)
)
),
footer = bs4dash_distill_footer()
)
server <- function(input, output, session) {
output$plot <- renderPlot({
Sys.sleep(2)
ggplot2::ggplot(
mtcars,
ggplot2::aes_string(x = input$variable, y = "mpg")
) +
ggplot2::geom_point()
})
}
shiny::shinyApp(ui, server)
|
7fbce5a94178b7228b11a3608f91b9ba9a3d80af | 82ff8a6cf9c4c6a871f57fc9418ca656bde9ec0a | /R/configuration.R | e32456029580948d15c2b20f03ca464e9031e8bb | [
"MIT"
] | permissive | mhkhan27/rhdx | 08800dc13e6f0a1ba9725845ddc2e96e8abf509e | c443336b2d9e2d57dc6c0f6a81ab7bc6db3a631c | refs/heads/master | 2023-08-14T09:47:20.267482 | 2021-10-13T09:09:40 | 2021-10-13T09:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,165 | r | configuration.R | #' HDX Configuration
#'
#' HDX Configuration allow to connect to an HDX server
#' and setup project where you can interact with the HDX platform
#'
#' @format NULL
#' @usage NULL
#'
#' @importFrom tools file_ext
#' @importFrom yaml read_yaml
#' @importFrom jsonlite fromJSON
#' @importFrom crul HttpClient
#'
#' @examples
#' \dontrun{
#' set_rhdx_config(hdx_site = "prod")
#' get_rhd_config()
#' }
HDXConfig <- R6::R6Class(
classname = "HDXConfig",
private = list(shared = .rhdx_env),
public = list(
#' @field data all info in list.
data = list(),
#' @description
#' Create a new Configuration object.
#'
#' @importFrom tools file_ext
#' @importFrom yaml read_yaml
#' @importFrom jsonlite read_json
#' @importFrom crul HttpClient
#'
#' @param hdx_site character the server instance to use
#' @param hdx_key character, the HDX API key
#' @param hdx_config configuration in a list
#' @param hdx_config_file a character value config file.
#' default is the config supplied in the package
#' @param read_only a logical value indicating if you want to just read
#' or be also able to write on the HDX server. You will need a API key to write.
#' @param user_agent a character value, User agent
#' @return A new Configuration object.
initialize = function(hdx_site = "prod", hdx_key = NULL,
hdx_config = NULL, hdx_config_file = NULL,
read_only = TRUE, user_agent = NULL) {
check_config_params(hdx_site = hdx_site, hdx_key = hdx_key,
hdx_config_file = hdx_config_file,
read_only = read_only, user_agent = user_agent)
if (!is.null(hdx_config_file) & !is.null(hdx_config))
stop("You need to have just one config parameter: `hdx_config_file` or `hdx_config`",
call. = FALSE)
if (is.null(hdx_config_file) & is.null(hdx_config))
hdx_config <- read_yaml(system.file("config",
"hdx_base_configuration.yml",
package = "rhdx"))
if (!is.null(hdx_config_file) & is.null(hdx_config)) {
file_ext <- file_ext(hdx_config)
if (!file_ext %in% c("yml", "json"))
stop("Only YAML and JSON configuration file are supported for the moment!",
call. = FALSE)
hdx_config <- switch(file_ext,
yml = read_yaml(hdx_config_file),
json = read_json(hdx_config_file,
simplifyVector = FALSE))
}
self$data$hdx_config <- hdx_config
self$data$hdx_site <- hdx_site
hdx_site <- paste0("hdx_", hdx_site, "_site")
self$data$hdx_key <- hdx_key
self$data$read_only <- read_only
headers <- NULL
if (isFALSE(read_only))
headers <- list(`X-CKAN-API-Key` = hdx_key)
if (is.null(user_agent))
user_agent <- get_user_agent()
self$data$remoteclient <- HttpClient$new(url = self$data$hdx_config[[hdx_site]]$url,
headers = headers,
opts = list(http_version = 2L,
useragent = user_agent))
},
#' @description
#' Configuration credentials when using a HDX API key
#' @importFrom base64enc base64decode
#' @return the username and password associated to the HDX API key
get_credentials = function() {
hdx_site <- paste0("hdx_", self$data$hdx_site, "_site")
lapply(self$data$hdx_config[[hdx_site]][c("username", "password")],
function(x) {
if (is.null(x)) {
x
} else {
rawToChar(base64decode(x))
}
})
},
#' @description
#' Create or revoke read only status
#'
#' @param read_only a logical value indicating if you want to just read or be also able to write on the HDX server. You will need a API key to write.
set_read_only = function(read_only = TRUE) {
hdx_site <- paste0("hdx_", self$data$hdx_site, "_site")
headers <- NULL
if (isFALSE(read_only))
headers <- list(`X-CKAN-API-Key` = self$data$hdx_key)
self$data$remoteclient <- crul::HttpClient$new(url = self$data$hdx_config[[hdx_site]]$url,
headers = headers,
opts = list(http_version = 2,
useragent = get_user_agent()))
},
#' @description
#' Specify a HDX API key
#'
#' @param hdx_key a character with key
set_hdx_key = function(hdx_key) {
if (!is_valid_uuid(key))
stop("key not valid!", call. = FALSE)
self$data$hdx_key <- hdx_key
},
#' @description
#' Specify a HDX API key
#'
#' @return a character, the HDX API key
get_hdx_key = function() {
self$data$hdx_key
},
#' @description
#' Specify a HDX server to use
#'
#' @importFrom crul HttpClient
#' @param hdx_site a character, the server type to use,
#' `prod`, `test`, `feature` or `demo`
#' @return a character, the HDX API key
set_hdx_site = function(hdx_site = "prod") {
if (!hdx_site %in% c("prod", "demo", "stage", "feature", "dev"))
stop("hdx_site can be either `prod`, `demo`, `stage`, `feature`, or `dev`",
call. = FALSE)
self$data$hdx_site <- hdx_site
hdx_site <- paste0("hdx_", hdx_site, "_site")
headers <- drop_nulls(list(`X-CKAN-API-Key` = self$data$hdx_key))
self$data$remoteclient <- HttpClient$new(url = self$data$hdx_config[[hdx_site]]$url,
headers = headers,
opts = list(http_version = 2,
useragent = get_user_agent()))
},
#' @description
#' Get the HDX server in use
#' @return the server type
get_hdx_site = function() {
self$data$hdx_site
},
#' @description
#' Get the HDX server URL in use
#' @return the server URL
get_hdx_site_url = function() {
hdx_site <- paste0("hdx_", self$data$hdx_site, "_site")
self$data$hdx_config[[hdx_site]]$url
},
#' @description
#' Get the remoteclient currently used
#' @return a crul::HttpClient
remoteclient = function() {
self$data$remoteclient
},
#' @description
#' Call the client to the HDX API
#'
#' @param action a character
#' @param ... parameters for each verb used
#' @param verb a character the verb used, `post`, `get`, `put` or `patch`
#' @return list a with status code and results
call_action = function(action, ..., verb = "get") {
if (!verb %in% c("post", "get", "put", "patch"))
stop("Only `get`, `post`, `put` and `patch` are supported!")
cli <- self$data$remoteclient
action_path <- paste0("/api/3/action/", action)
res <- cli$verb(verb, path = action_path, ...)
parse_response(res)
},
#' @description
#' read and show Configuration object
#' @return Configuration object
read = function() {
self
},
#' @description
#' Setup Configuration object
#'
#' @param hdx_site a character value, the server
#' @param hdx_config a list
#' @param configuration a character
#' @param hdx_key a character value, the API key
#' @param read_only a logical value read only
setup = function(hdx_site = "prod", hdx_key = NULL, read_only = TRUE,
hdx_config = NULL, configuration = NULL) {
if (!hdx_site %in% c("prod", "demo", "stage", "feature", "dev"))
stop("hdx_site can be either `prod`, `demo`, `stage`, `feature`, or `dev`",
call. = FALSE)
if (!is.null(configuration)) {
if (!inherits(configuration, "Configuration"))
stop("Not a 'Configuration' object!", call. = FALSE)
private$shared$configuration <- configuration
} else {
private$shared$configuration <- HDXConfig$new(hdx_site = hdx_site,
hdx_key = hdx_key,
read_only = read_only,
hdx_config = hdx_config)
}
},
#' @description
#' Delete a Configuration object
delete = function() {
private$shared$configuration <- NULL
},
#' Access the global Configuration
#' @return list with HDX configuration information
get_global_config = function() {
self$data$hdx_config
},
#' @description
#'
#' Get general statistics about the server
#'
#' @importFrom jsonlite read_json
#'
#' @return list with statistics about the server
general_statistics = function() {
res <- self$data$remoteclient$get(path = "/api/3/action/hdx_general_statistics")
read_json(res$parse(encoding = "UTF-8"),
simplifyVector = TRUE)$result
},
#' @description
#' Convert configuration to list
#' @return configuration in list format
as_list = function() {
self$data
},
#' @description
#' Print Configuration object
print = function() {
cat("<HDX Configuration> ", sep = "\n")
cat(paste0(" HDX site: ", self$get_hdx_site()), sep = "\n")
cat(paste0(" HDX site url: ", self$get_hdx_site_url()), sep = "\n")
cat(paste0(" HDX API key: ", self$get_hdx_key()), sep = "\n")
invisible(self)
}
)
)
#' Create an HDX configuration object
#'
#' Create and HDX configuration object
#' @param hdx_site Character to specify which HDX server you want to use. Default to "prod".
#' @param hdx_key Character for the CKAN API key, it is required to push data into HDX
#' @param hdx_config List of HDX configuration
#' @param hdx_config_file Character, path of the HDX config file in JSON and YAML format
#' @param read_only Logical if `FALSE` and hdx_key provided is correct
#' you can push metdata and data to HDX
#' @return An HDX Configuration object
#' @export
create_rhdx_config <- function(hdx_site = "prod", hdx_key = NULL,
read_only = TRUE, hdx_config = NULL, hdx_config_file = NULL) {
HDXConfig$new(hdx_site = hdx_site, hdx_key = hdx_key,
read_only = read_only, hdx_config = hdx_config,
hdx_config_file = hdx_config_file)
}
#' Set rhdx config
#'
#' Sets the configuration settings for using rhdx.
#'
#' @param hdx_site Character to specify which HDX server you want to use. Default to "prod".
#' @param hdx_key Character for the CKAN API key, it is required to push data into HDX
#' @param hdx_config List of HDX configuration
#' @param hdx_config_file Character, path of the HDX config file in JSON and YAML format
#' @param read_only Logical if `FALSE` and hdx_key provided is correct
#' you can push metdata and data to HDX
#' @param configuration Configuration object.
#'
#' @rdname set_rhdx_config
#'
#' @details Setting up a configuration will help you access from an HDX server
#'
#'
#' @return Invisibly returns the rhdx config object
#' @export
#'
#' @examples
#' \dontrun{
#' # Setting the config to use HDX default server
#' set_rhdx_config(hdx_site = "demo")
#'
#' # You can check your configuration using \code{get_rhdx_config}
#' config <- get_rhdx_config()
#' config
#' }
set_rhdx_config <- function(hdx_site = "prod", hdx_key = NULL, read_only = TRUE, hdx_config = NULL, hdx_config_file = NULL, configuration = NULL) {
if (!is.null(configuration) & inherits(configuration, "Configuration")) {
.rhdx_env$configuration <- configuration
} else {
.rhdx_env$configuration <- HDXConfig$new(hdx_site = hdx_site,
hdx_key = hdx_key,
read_only = read_only,
hdx_config = hdx_config,
hdx_config_file = hdx_config_file)
}
}
#' @rdname set_rhdx_config
#' @export
get_rhdx_config <- function() {
configuration <- .rhdx_env$configuration
assert_configuration(configuration)
configuration$read()
}
#' Delete rhdx config
#'
#' Delete the configuration settings for using rhdx.
#'
#' @rdname delete_rhdx_config
#'
#' @details Delete HDX config
#'
#'
#' @return None
#' @export
#'
#' @examples
#' \dontrun{
#' # Setting the config to use HDX default server
#' set_rhdx_config(hdx_site = "prod")
#' get_rhdx_config()
#'
#' delete_rhdx_config()
#' get_rhdx_config()
#' }
delete_rhdx_config <- function() {
configuration <- get_rhdx_config()
configuration$delete()
}
#' Get general stats about HDX
#'
#' Get some stats about HDX
#'
#' @return A list
#' @export
hdx_general_statistics <- function() {
configuration <- get_rhdx_config()
configuration$general_statistics()
}
|
ed04ac0d4106893e07e54c7aae66a070c91bb18f | f6839b533bdf2aaed9ca6dcd18b33a797d2c976c | /621/Assignment3/Professors Code/Clewlow3_13.R | 0b70e82f7ab4a5040a72e9bfe4a6c8d6c059b7a3 | [] | no_license | saeed349/R-Projects | 68fe11b57145daaf166381188844c968c424bcab | 504ee4741effd8ba3a0e1b706c238e08acbdca60 | refs/heads/master | 2021-01-09T05:28:25.377394 | 2017-05-16T22:11:54 | 2017-05-16T22:11:54 | 80,773,896 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,055 | r | Clewlow3_13.R | Clewlow3_13 = function(isCall, K=100, Tm=1,
S0=100, r=0.06, sig=0.2, N=3, div=0.03, dx=0.2)
{
# Implicit Finite Difference Method: i times, 2*i+1 final nodes
# Precompute constants ----
dt = Tm/N
nu = r - div - 0.5 * sig^2
edx = exp(dx)
# got the constants formulas from clewlow 3.33,3.34,3.35
pu = -0.5 * dt * ( (sig/dx)^2 + nu/dx )
pm = 1.0 + dt * (sig/dx)^2 + r*dt
pd = -0.5 * dt * ( (sig/dx)^2 - nu/dx)
firstRow = 1
nRows = lastRow = 2*N+1
firstCol = 1
middleRow = nCols = lastCol = N+1
cp = ifelse(isCall, 1, -1)
# Intialize asset price, derivative price, primed probabilities ----
pp = pmp = V = S = matrix(0, nrow=nRows, ncol=nCols, dimnames=list(
paste("NumUps=",(nCols-1):-(nCols-1), sep=""),
paste("Time=",round(seq(0, 1, len=nCols),4),sep="")))
S[middleRow, firstCol] = S0
for (i in 1:(nCols-1)) {
for(j in (middleRow-i+1):(middleRow+i-1)) {
S[j-1, i+1] = S[j, i] * exp(dx)
S[j , i+1] = S[j, i]
S[j+1, i+1] = S[j, i] * exp(-dx)
}
}
# Intialize option values at maturity ----
for (j in firstRow:lastRow) {
V[j, lastCol] = max( 0, cp * (S[j, lastCol]-K))
}
# Compute Derivative Boundary Conditions ----
# From equation 3.38 and 3.39 in Clewlow
lambdaL = -1 * (S[lastRow-1, lastCol] - S[lastRow,lastCol])
lambdaU = 0
# Step backwards through the lattice ----
for (i in (lastCol-1):firstCol) {
h = solveImplicitTridiagonal(V, pu, pm, pd, lambdaL, lambdaU, i)
pmp[,i] = h$pmp # collect the pm prime probabilities
pp [,i] = h$pp # collect the p prime probabilities
V = h$V
# Apply Early Exercise condition ----
for(j in lastRow:firstRow) {
V[j, i] = max(V[j, i], cp * (S[j, lastCol] - K))
}
}
# Return the price ----
list(Type = paste( "American", ifelse(isCall, "Call", "Put")),Price = V[middleRow,firstCol],
Probs=round(c(pu=pu, pm=pm, pd=pd),middleRow), pmp=round(pmp,4), pp=round(pp,4),
S=round(S,2), V=round(V,middleRow))
}
Clewlow3_13(isCall = FALSE)
|
459df3d67008765d444b9e45b7f17ebbe5566b62 | bb58aec3d341aab8b91fd210de4ee41389ea156c | /R/Basic_EDA.R | 102c4670a80a22ed91305f8236400744b5d81752 | [] | no_license | John-Snyder/COVID-19-Modeling | a5821b9e276d3cd3da604c067371c708f737a440 | de47faaf1d6483ba9bf8cb91e1c8a532db75afae | refs/heads/master | 2021-04-10T00:51:46.197055 | 2020-03-30T02:04:37 | 2020-03-30T02:04:37 | 248,898,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,247 | r | Basic_EDA.R | library(dplyr)
library(tidyr)
library(ggplot2)
source("./R/Import_TS_data.R")
covid19_long <- read.csv("./Data/COVID19_TS_long.csv")
covid19_long <-
covid19_long %>%
filter(Confirmed>1) %>%
group_by(Province.State,Country.Region) %>%
arrange(Date) %>%
mutate(Days_Since_First = 1:n(),
Country_Province = paste(Country.Region,Province.State,sep="-"),
deriv=c(0,diff(Confirmed,lag = 1)))
covid19_long_US <- covid19_long %>%
filter(Country.Region == "US") %>%
filter(!(Province.State %in% c("Diamond Princess","Grand Princess"))) %>%
mutate(Missouri = ifelse(Province.State=="Missouri","Yes","No"))
covid19_long_Japan <- covid19_long %>%
filter(Country.Region == "Italy")
#pdf(file = "~/Desktop/plot.pdf",width = 30,height = 20)
ggplot(covid19_long_US,
aes(x=Days_Since_First,
y=log(Confirmed),
group=Country_Province,
color=Country_Province)) +
geom_line() +
theme_bw() + guides(color = FALSE,
size = FALSE)
ggplot(covid19_long_US,
aes(x=Days_Since_First,
y=sqrt(deriv),
group=Country_Province,
color=Country_Province)) + geom_line() + theme_bw() + guides(color = FALSE, size = FALSE)
#dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.