blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a33f32474bfdbce06eaf8fb6e7abfc723191b062 | 116eaeafe38f8df9aaa628df6e444144339376ba | /MechaCarChallenge.R | fddb04af170ef176168ecb47d7b3425d7093a444 | [] | no_license | kaytar23/MechaCar_Statistical_Analysis | cfa6f4b02093084af3827ae80dc64c993e3c4c5d | 4c6ad3670b2b51e664be56faeeccb1c003bb4ff8 | refs/heads/main | 2023-08-18T01:19:23.926486 | 2021-09-23T03:52:22 | 2021-09-23T03:52:22 | 409,409,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 749 | r | MechaCarChallenge.R | library(dplyr)
MechaCar_mpg <- read_csv("MechaCar_mpg.csv")
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=MechaCar_mpg))
Suspension_Coil <- read_csv("Suspension_Coil.csv")
total_summary <- Suspension_Coil %>% summarise(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI), SD=sd(PSI))
lot_summary <- Suspension_Coil %>% group_by(Manufacturing_Lot) %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI), SD=sd(PSI))
t.test(Suspension_Coil$PSI, mu=1500)
t.test(subset(Suspension_Coil,Manufacturing_Lot=="Lot1")$PSI, mu = 1500)
t.test(subset(Suspension_Coil,Manufacturing_Lot=="Lot2")$PSI, mu = 1500)
t.test(subset(Suspension_Coil,Manufacturing_Lot=="Lot3")$PSI, mu = 1500)
|
10b3df9cf919aefd3c5b361650ff4e2310499f98 | 2d7b44297ddfcedbea062dd960caa90c72841173 | /vote-rennes.R | f6d97c30914cccaed7ee264f16867c4f812dafa7 | [] | no_license | DataBzh/territoire | c7c342a4ea64697142fb2aa6c5d0134bf780362b | fb6a07f1a76dc2861cf37fb07455159948427735 | refs/heads/master | 2021-01-19T03:31:44.667322 | 2019-05-27T06:54:32 | 2019-05-27T06:54:32 | 64,167,586 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,081 | r | vote-rennes.R | library(tidyverse)
library(rgdal)
source("/home/colin/Dropbox/R/misc/data-bzh-tools-master/main.R")
rennes <- read_csv2("https://data.rennesmetropole.fr/explore/dataset/centres-de-vote/download/?format=csv&timezone=Europe/Berlin&use_labels_for_header=true")
rennes <- rennes %>%
separate(`Geo Point`, into = c("long","lat"), sep = ",")
table(rennes$c_nom) %>%
sort()
sum(rennes$burx_nb)
rennes %>%
group_by(c_nom) %>%
summarise(somme = n()) %>%
arrange(desc(somme)) %>%
ggplot(aes(x = reorder(c_nom, somme), y = somme))+
geom_bar(stat = "identity", fill = databzh$colour1) +
coord_flip() +
xlab("") +
ylab(" ") +
labs(title = "Centres de vote sur Rennes Métropole",
subtitle = "Données via : Open Data Rennes",
caption = "http://data-bzh.fr") +
databzhTheme()
ggplot(rennes, aes(burx_nb)) +
geom_bar(fill = databzh$colour2) +
xlab("") +
ylab(" ") +
labs(title = "Bureaux de vote par centre sur Rennes Métropole",
subtitle = "Données via : Open Data Rennes",
caption = "http://data-bzh.fr") +
databzhTheme()
roj <- readOGR(dsn=".", layer="emprise-de-rennes-metropole")
wmap_df <- fortify(roj)
ggplot(wmap_df, aes(long,lat, group=group)) +
geom_polygon(fill = "#e4e4e4") +
coord_map() +
geom_path(data=wmap_df, aes(long, lat, group=group), color="grey50") +
geom_point(data=rennes, aes(as.numeric(lat), as.numeric(long), group = NULL), color = databzh$colours[2]) +
scale_size(range = c(1,12)) +
xlab("") +
ylab(" ") +
labs(title = "Centres de vote sur Rennes Métropole",
subtitle = "Données via : Open Data Rennes",
caption = "http://data-bzh.fr") +
theme(title=element_text(),
plot.title=element_text(margin=margin(0,0,20,0), size=18, hjust = 0.5),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.grid.major= element_line("grey50", linetype = "dashed"),
panel.background= element_blank())
|
8da72434959bf5e76f562413a904cefde8fbcf7d | f21245e27e040e5c02a3f4a1936e8e8b96f069b1 | /R/gplates_reconstruct.R | e1aa168366faed0e04190ec02c53a382eabf315f | [] | no_license | LunaSare/gplatesr | d642b59ac07353f83220419a534cf962e3b052be | 24c182c77caed9eb6125380084f56ca6d5884960 | refs/heads/master | 2022-09-13T22:37:07.489673 | 2022-08-29T16:43:44 | 2022-08-29T16:43:44 | 164,476,513 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,953 | r | gplates_reconstruct.R | #' Launch the service via docker
#' @export
launch_docker <- function() {
#docker <- stevedore::docker_client()
#docker$container$run("alpine:3.1", c("echo", "hello world"))
#system("docker run --rm -it -p 8888:80 gplates/gws")
system("docker run --rm -p 8888:80 gplates/gws", wait=FALSE)
}
#' Reconstruct a point
#' @param lon A numeric vector of length one.
#' @param lat A numeric vector of length one.
#' @param age A numeric vector of length one indicating the geologic time to reconstruct.
#' @param base_url The url to use; make sure it ends with a slash
#' @return Coordinates
#' @export
gplates_reconstruct_point <- function(lon,lat,age, base_url='http://gws.gplates.org/'){
url <- paste0(base_url,'reconstruct/reconstruct_points/')
# query <- sprintf('?points=%f,%f&time=%d&model=GOLONKA',lon,lat,as.integer(age)) #The Paleobiodb navigator uses GOLONKA, PALEOMAP extends to 750 ma, default only to 200 ma
query <- sprintf('?points=%f,%f&time=%f&model=GOLONKA',lon,lat,as.numeric(age)) #The Paleobiodb navigator uses GOLONKA, PALEOMAP extends to 750 ma, default only to 200 ma
fullrequest <- sprintf(paste0(url,query))
print(fullrequest)
rawdata <- readLines(fullrequest, warn="F")
dat <- jsonlite::fromJSON(rawdata)
rcoords = dat['coordinates'][[1]]
return(rcoords)
}
#' Reconstruct ancient coastlines
#' @inherit gplates_reconstruct_point
#' @return An S4 object of class SpatialPolygonsDataFrame
#' @export
gplates_reconstruct_coastlines <- function(age, base_url='http://gws.gplates.org/'){
url <- paste0(base_url,'reconstruct/coastlines/')
# query <- sprintf('?time=%d&model=GOLONKA',as.integer(age))
query <- sprintf('?time=%f&model=GOLONKA',as.numeric(age))
fullrequest <- sprintf(paste0(url,query))
print(fullrequest)
r <- httr::GET(fullrequest)
bin <- httr::content(r, "raw")
writeBin(bin, paste0(tempdir(), "/myfile.geojson"))
#dat <- rgdal::readOGR(dsn=paste0(tempdir(), "/myfile.geojson"), layer="OGRGeoJSON", stringsAsFactors=FALSE)
dat <- rgdal::readOGR(dsn=paste0(tempdir(), "/myfile.geojson"))
return(dat)
}
#' Reconstruct static polygons
#' @inherit gplates_reconstruct_point
#' @return An S4 object of class SpatialPolygonsDataFrame
#' @export
gplates_reconstruct_static_polygons <- function(age, base_url='http://gws.gplates.org/'){
url <- paste0(base_url,'reconstruct/static_polygons/')
#url <- 'http://gws.gplates.org/reconstruct/static_polygons/'
#query <- sprintf('?time=%d&model=GOLONKA',as.integer(age))
query <- sprintf('?time=%f&model=GOLONKA',as.numeric(age))
fullrequest <- sprintf(paste0(url,query))
print(fullrequest)
r <- httr::GET(fullrequest)
bin <- httr::content(r, "raw")
writeBin(bin, paste0(tempdir(), "/myfile.geojson"))
#dat <- rgdal::readOGR(dsn=paste0(tempdir(), "/myfile.geojson"), layer="OGRGeoJSON", stringsAsFactors=FALSE)
dat <- rgdal::readOGR(dsn=paste0(tempdir(), "/myfile.geojson"))
return(dat)
}
|
84332a105f40e144ad66fade97051a2b3c3df58f | 127141eb7e2897126bc81caa19b6e6cb5ceb9572 | /global.R | 4680b9b7666f68b5053731462ed0df6548c1e17f | [] | no_license | yogesh1612/network_data_prep_app | c04ecadc2a506f4d3f7701e1a03bd666e4f8e9e2 | 6c4b2229b93bebe73158bdb27d90f5cfb07a7f27 | refs/heads/main | 2023-03-12T23:29:02.274201 | 2021-02-25T09:33:27 | 2021-02-25T09:33:27 | 341,204,060 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,697 | r | global.R | ## --- functionize dataframe to adjacency mat for network-an app input
df2adjacency <- function(input_df, cutoff_percentile=0.25,id_var){
rownames(input_df) <- make.names(input_df[,id_var], unique=TRUE)
#rownames(input_df) <- input_df[,id_var]
input_df[,id_var] <- NULL
# first, retain only metric colms
a0_logi = apply(input_df, 2, function(x) {is.numeric(x)}); a0_logi
df0 = input_df[, a0_logi]
n1 = nrow(df0); n1
# calc dist mat
dist_mat = dist(as.matrix(scale(df0)))
# build full square-shaped dist matrix
full_dmat = matrix(0, n1, n1)
diag(full_dmat) = 1 # created shell matrix
counter0 = 1
for (i in 1:(n1-1)){
counter1 = counter0 + (n1 - i) - 1; counter1
vals = dist_mat[counter0: counter1]; vals
full_dmat[(i+1):n1, i] = vals; full_dmat
counter0 = counter1+1; counter0
} # 0.01s for 32x32 mat
# populate upper triangular part also
for (row0 in 1:(n1-1)){
for (colm0 in (row0+1):n1){
full_dmat[row0, colm0] = full_dmat[colm0, row0] } }
## set threshold in percentile terms (can be slider based widget in shiny app)
#thresh0=0.25 # say. top 25% closest rows only form links
thresh1 = quantile(unlist(full_dmat), cutoff_percentile,na.rm = TRUE) %>% as.numeric(); thresh1
adj_mat = map_dfc(full_dmat, function(x) {1*(x < thresh1)}) # 0.03s
adj_mat1 = matrix(adj_mat, n1, n1) # adj_mat1[1:8,1:8]
rownames(adj_mat1) = rownames(input_df)
colnames(adj_mat1) = rownames(input_df)
return(adj_mat1)
} # func ends
# test-drive above
# system.time({ adj0 = df2adjacency(input_df, 0.33,"car") }) # 0.05s
# adj0[1:8,1:8] # view a few
summry_df <- function(df){
# select data type
summ_df <- data.frame()
summ_df <- df %>%
summarise_all(class) %>%
gather() %>% rename("Variable"="key","Datatype"="value")
mean <- df %>%
summarise(across(where(is.numeric), mean, na.rm= TRUE))%>%t()
sd <- df %>%
summarise(across(where(is.numeric), sd, na.rm= TRUE))%>%t()
min <- df %>%
summarise(across(where(is.numeric), min, na.rm= TRUE))%>%t()
max <- df %>%
summarise(across(where(is.numeric), max, na.rm= TRUE))%>%t()
mode <- sapply(df,function(x) getmode(x))
t1 <- as.data.frame(cbind(mean,sd,min,max,mode))
names(t1) <- c('Mean',"SD","Min","Max","Mode")
t2 <- tibble::rownames_to_column(t1, "Variable")%>% mutate_if(is.numeric, round, digits=2)
t3 <- left_join(summ_df,t2,by=c("Variable"))
return(t3)
}
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
|
2fc358f9d4ea151f1019742fe343a99e2ea21fa2 | 137736b1a6048880f4d525e15f46ce4092239548 | /code/03d simulate sampling - stratified by zone.R | 30f1d9aa8498aa0e37068decccc1f916a93d82db | [] | no_license | BritishTrustForOrnithology/eodip5_earth_obs_power_analysis | 21a5a68f668924d40b254ed7a01bdccdf22484e7 | c7b77137d567080c7b600f6158ad8c251b8d7364 | refs/heads/master | 2022-11-06T17:59:13.021168 | 2020-06-23T07:37:52 | 2020-06-23T07:37:52 | 274,339,857 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,789 | r | 03d simulate sampling - stratified by zone.R | #simulate sampling and error - simple random sampling within classification zones
#Simon Gillings
#February 2016
library(plyr)
#set working directory
setwd('X:\\Shared Projects\\Defra80 - EODIP5\\gis analysis\\')
# LOAD DATA ------------------------------------------------------------------------------------------
#load data
df<-readRDS('parcels_with_grids.rds')
#load habitat short names
hablabs<-read.table(file='hablabels.csv',sep=',',header=T,colClasses = rep('character',2))
#lookup lists of squares by zone. Note that some parcels in the original Living Map files fall outside the zones shapefile provided
zone.500m<-read.table(file='centroids_500m_zone.csv',sep=',',header=T,colClasses = rep('character',2))
names(zone.500m)<-c('ref500m','zone500m')
zone.200m<-read.table(file='centroids_200m_zone.csv',sep=',',header=T,colClasses = rep('character',2))
names(zone.200m)<-c('ref200m','zone200m')
zone.100m<-read.table(file='centroids_100m_zone.csv',sep=',',header=T,colClasses = rep('character',2))
names(zone.100m)<-c('ref100m','zone100m')
# PREPARE DATA -----------------------------------------------------------------------------------------
#combine the two habs as agreed by JNCC
df$CLASS<-ifelse(df$CLASS=='Semi-improved (poor condition)','Semi-improved grassland',df$CLASS)
#merge zones with df for each grid resolution
df<-merge(df,zone.500m,by='ref500m',all.x=T)
df<-merge(df,zone.200m,by='ref200m',all.x=T)
df<-merge(df,zone.100m,by='ref100m',all.x=T)
#some parcels fall in squares outside the zones - mark up as zone X
df$zone500m[is.na(df$zone500m)]<-'X'
df$zone200m[is.na(df$zone200m)]<-'X'
df$zone100m[is.na(df$zone100m)]<-'X'
#get definitive list of squares at each resolution
#sqrlist.01km<-unique(df$ref01km)
sqrlist.500m<-unique(subset(df,select=c('ref500m','zone500m')))
sqrlist.200m<-unique(subset(df,select=c('ref200m','zone200m')))
sqrlist.100m<-unique(subset(df,select=c('ref100m','zone100m')))
#how many squares per zone per grid resolution
table(sqrlist.500m$zone500m)
table(sqrlist.200m$zone200m)
table(sqrlist.100m$zone100m)
#get definitive list of habitats
hablist<-data.frame(CLASS=unique(df$CLASS),stringsAsFactors = F)
#add error rates
hablist$error.low<-0.05
hablist$error.mid<-0.15
hablist$error.high<-0.30
#sort
hablist<-hablist[order(hablist$CLASS),]
hablist<-merge(hablist,hablabs,by='CLASS')
#simulate random sampling in each zone
simulate.random<-function(repnum,nsqr,gridres,errorlevel) {
#repnum = counter to show progress on reps
#nsqr = number of squares in sample
#gridres = what size grid squares to use
#errorlevel = use low, mid or high error estimate
cat(paste(repnum,nsqr,gridres,errorlevel,'\n'))
#get the relevant list of squares in the region and assign to object squares
assign("squares",get(apropos(gridres) ) )
names(squares)[2]<-'zone'
#make a sample of squares at this resolution (without replacement)
#need to check that sample size is not greater than number of squares in region; if so reduce sample size for this run
max.samp.size<-table(squares$zone)
nsqr.A<-ifelse(max.samp.size[['A']]<nsqr,max.samp.size[['A']],nsqr)
nsqr.B<-ifelse(max.samp.size[['B']]<nsqr,max.samp.size[['B']],nsqr)
nsqr.C<-ifelse(max.samp.size[['C']]<nsqr,max.samp.size[['C']],nsqr)
nsqr.D<-ifelse(max.samp.size[['D']]<nsqr,max.samp.size[['D']],nsqr)
nsqr.E<-ifelse(max.samp.size[['E']]<nsqr,max.samp.size[['E']],nsqr)
nsqr.X<-ifelse(max.samp.size[['X']]<nsqr,max.samp.size[['X']],nsqr)
#process each zone separately
this.sample.A<-as.character(sample(squares[squares$zone=='A',1], replace=F, size=nsqr.A, prob=NULL))
this.sample.B<-as.character(sample(squares[squares$zone=='B',1], replace=F, size=nsqr.B, prob=NULL))
this.sample.C<-as.character(sample(squares[squares$zone=='C',1], replace=F, size=nsqr.C, prob=NULL))
this.sample.D<-as.character(sample(squares[squares$zone=='D',1], replace=F, size=nsqr.D, prob=NULL))
this.sample.E<-as.character(sample(squares[squares$zone=='E',1], replace=F, size=nsqr.E, prob=NULL))
this.sample.X<-as.character(sample(squares[squares$zone=='X',1], replace=F, size=nsqr.X, prob=NULL))
#get all parcels in these squares, after checking which column holds the grid refs and CLASS at this scale
i<-which(names(df)==paste0('ref',gridres))
ic<-which(names(df)=='CLASS')
this.parcels.A<-df[df[,i] %in% this.sample.A,ic]
this.parcels.B<-df[df[,i] %in% this.sample.B,ic]
this.parcels.C<-df[df[,i] %in% this.sample.C,ic]
this.parcels.D<-df[df[,i] %in% this.sample.D,ic]
this.parcels.E<-df[df[,i] %in% this.sample.E,ic]
this.parcels.X<-df[df[,i] %in% this.sample.X,ic]
#tally up how many parcels of each type would be checked
this.hab.freqs.A<-as.data.frame(table(this.parcels.A),stringsAsFactors = F)
this.hab.freqs.B<-as.data.frame(table(this.parcels.B),stringsAsFactors = F)
this.hab.freqs.C<-as.data.frame(table(this.parcels.C),stringsAsFactors = F)
this.hab.freqs.D<-as.data.frame(table(this.parcels.D),stringsAsFactors = F)
this.hab.freqs.E<-as.data.frame(table(this.parcels.E),stringsAsFactors = F)
this.hab.freqs.X<-as.data.frame(table(this.parcels.X),stringsAsFactors = F)
names(this.hab.freqs.A)<-c('CLASS','n.parcels.checked.A')
names(this.hab.freqs.B)<-c('CLASS','n.parcels.checked.B')
names(this.hab.freqs.C)<-c('CLASS','n.parcels.checked.C')
names(this.hab.freqs.D)<-c('CLASS','n.parcels.checked.D')
names(this.hab.freqs.E)<-c('CLASS','n.parcels.checked.E')
names(this.hab.freqs.X)<-c('CLASS','n.parcels.checked.X')
#merge results with master list of habs and error rates and convert NAs to zeroes
this.hab.freqs<-join_all(list(hablist,this.hab.freqs.A,this.hab.freqs.B,this.hab.freqs.C,this.hab.freqs.D,this.hab.freqs.E,this.hab.freqs.X) ,by='CLASS')
this.hab.freqs[is.na(this.hab.freqs)]<-0
#simulate how many errors there might be in this many patches. First create a column of 1s to force code to create 1 answer per call of rbinom
this.hab.freqs$x<-1
#check which error column to use
e<-which(names(this.hab.freqs)==paste0('error.',errorlevel))
#simulate the number of errors
n.errors.A<-rbinom(this.hab.freqs$x,this.hab.freqs$n.parcels.checked.A,this.hab.freqs[,e])
n.errors.B<-rbinom(this.hab.freqs$x,this.hab.freqs$n.parcels.checked.B,this.hab.freqs[,e])
n.errors.C<-rbinom(this.hab.freqs$x,this.hab.freqs$n.parcels.checked.C,this.hab.freqs[,e])
n.errors.D<-rbinom(this.hab.freqs$x,this.hab.freqs$n.parcels.checked.D,this.hab.freqs[,e])
n.errors.E<-rbinom(this.hab.freqs$x,this.hab.freqs$n.parcels.checked.E,this.hab.freqs[,e])
n.errors.X<-rbinom(this.hab.freqs$x,this.hab.freqs$n.parcels.checked.X,this.hab.freqs[,e])
#bind the results together and create output
this.hab.freqs<-cbind(this.hab.freqs,n.errors.A,n.errors.B,n.errors.C,n.errors.D,n.errors.E,n.errors.X)
this.hab.freqs$error.rate.A<-this.hab.freqs$n.errors.A/this.hab.freqs$n.parcels.checked.A
this.hab.freqs$error.rate.B<-this.hab.freqs$n.errors.B/this.hab.freqs$n.parcels.checked.B
this.hab.freqs$error.rate.C<-this.hab.freqs$n.errors.C/this.hab.freqs$n.parcels.checked.C
this.hab.freqs$error.rate.D<-this.hab.freqs$n.errors.D/this.hab.freqs$n.parcels.checked.D
this.hab.freqs$error.rate.E<-this.hab.freqs$n.errors.E/this.hab.freqs$n.parcels.checked.E
this.hab.freqs$error.rate.X<-this.hab.freqs$n.errors.X/this.hab.freqs$n.parcels.checked.X
this.hab.freqs$nsqr<-nsqr
this.hab.freqs$gridres<-gridres
this.hab.freqs$errorlevel<-errorlevel
this.hab.freqs$error.expected<-this.hab.freqs[,e]
this.hab.freqs<-this.hab.freqs[,-c(2,3,4)]
return(this.hab.freqs)
}
##################################################################################################################################################
# ALL SCALES, WIDE RANGE OF SAMPLE SIZES, 15% ERROR ---------------------------------------------------------------
##################################################################################################################################################
#create population of scenarios
sqrs.min<-100
sqrs.max<-2000
sqrs.step<-100
reps<-100
#expand.grid creates all permutations of whatever vectors are fed in.
scenarios<-expand.grid(seq(from=1,to=reps,by=1),
seq(sqrs.min,sqrs.max,by=sqrs.step),
c('500m','200m','100m'),
'mid',
stringsAsFactors = F)
#rename columns
names(scenarios)<-c('repnum','nsqr','gridres','errorlevel')
#how many scenarios
dim(scenarios)
#apply the simulate function to all scenarios
results<-mapply(simulate.random,scenarios$repnum,scenarios$nsqr,scenarios$gridres,scenarios$errorlevel,SIMPLIFY=F)
#convert list of dataframes to one big dataframe
results <- ldply(results, data.frame)
saveRDS(results,file='sim_random_sampling_by_zone_15pc_error\\sim_random_sampling_by_zone_results_3scales_100-2000sqrs_error15.rds')
#results<-readRDS(file='sim_random_sampling_by_zone_15pc_error\\sim_random_sampling_by_zone_results_3scales_100-2000sqrs_error15.rds')
#make boxplots
#how many habs to plot
nhabs<-nrow(hablist)
for(h in 1:nhabs) {
#which habitat
this.hab<-hablist$CLASS[h]
this.lab<-hablist$LABEL[h]
#create output for image for this habitat
loc.name<-paste0('sim_random_sampling_by_zone_15pc_error\\sim_random_sampling_error15_',gsub(" ", "_", this.hab),'.png')
png(loc.name,width=1000,height=1500)
#set image parameters for png output
par(mfrow=c(5,2))
par(mar=c(6,9,4,2))
par(mgp=c(0,2.5,0))
par(tcl=-1)
#get results for this habitat
this.results<-results[results$CLASS==this.hab,]
#process a single grid scale
this.results<-this.results[this.results$gridres=='500m',]
#for this habitat, convert NaN values to zeroes
this.results$error.rate.A<-ifelse(is.nan(this.results$error.rate.A),0,this.results$error.rate.A)
this.results$error.rate.B<-ifelse(is.nan(this.results$error.rate.B),0,this.results$error.rate.B)
this.results$error.rate.C<-ifelse(is.nan(this.results$error.rate.C),0,this.results$error.rate.C)
this.results$error.rate.D<-ifelse(is.nan(this.results$error.rate.D),0,this.results$error.rate.D)
this.results$error.rate.E<-ifelse(is.nan(this.results$error.rate.E),0,this.results$error.rate.E)
this.results$error.rate.X<-ifelse(is.nan(this.results$error.rate.X),0,this.results$error.rate.X)
this.error<-unique(this.results$error.expected)
boxplot(data=this.results,n.parcels.checked.A~nsqr,cex.axis=2.5,main='Zone A: No. parcels checked',cex.main=3.25,las=1)
boxplot(data=this.results,error.rate.A~nsqr,cex.axis=2.5,main='Zone A: Error estimates',cex.main=3.25,ylim=c(0,1),las=1)
abline(h=this.error,col='red')
boxplot(data=this.results,n.parcels.checked.B~nsqr,cex.axis=2.5,main='Zone B: No. parcels checked',cex.main=3.25,las=1)
boxplot(data=this.results,error.rate.B~nsqr,cex.axis=2.5,main='Zone B: Error estimates',cex.main=3.25,ylim=c(0,1),las=1)
abline(h=this.error,col='red')
boxplot(data=this.results,n.parcels.checked.C~nsqr,cex.axis=2.5,main='Zone C: No. parcels checked',cex.main=3.25,las=1)
boxplot(data=this.results,error.rate.C~nsqr,cex.axis=2.5,main='Zone C: Error estimates',cex.main=3.25,ylim=c(0,1),las=1)
abline(h=this.error,col='red')
boxplot(data=this.results,n.parcels.checked.D~nsqr,cex.axis=2.5,main='Zone D: No. parcels checked',cex.main=3.25,las=1)
boxplot(data=this.results,error.rate.D~nsqr,cex.axis=2.5,main='Zone D: Error estimates',cex.main=3.25,ylim=c(0,1),las=1)
abline(h=this.error,col='red')
boxplot(data=this.results,n.parcels.checked.E~nsqr,cex.axis=2.5,main='Zone E: No. parcels checked',cex.main=3.25,las=1)
boxplot(data=this.results,error.rate.E~nsqr,cex.axis=2.5,main='Zone E: Error estimates',cex.main=3.25,ylim=c(0,1),las=1)
abline(h=this.error,col='red')
#boxplot(data=this.results.grid,n.parcels.checked.X~nsqr,cex.axis=2.5,main='Zone X: No. parcels checked',cex.main=3.25,las=1)
#boxplot(data=this.results.grid,error.rate.X~nsqr,cex.axis=2.5,main='Zone X: Error estimates',cex.main=3.25,ylim=c(0,1),las=1)
dev.off()
}
#summarise how often the "correct" answer is found. Acceptable precision of +/-5% (arithmetic) on the known error rate
#replace NaNs with zeroes for this classification of whether error was correctly assigned
precision<-0.05
error<-0.15
precision.lower<-error-precision
precision.upper<-error+precision
results$error.rate.A<-ifelse(is.nan(results$error.rate.A),0,results$error.rate.A)
results$error.rate.B<-ifelse(is.nan(results$error.rate.B),0,results$error.rate.B)
results$error.rate.C<-ifelse(is.nan(results$error.rate.C),0,results$error.rate.C)
results$error.rate.D<-ifelse(is.nan(results$error.rate.D),0,results$error.rate.D)
results$error.rate.E<-ifelse(is.nan(results$error.rate.E),0,results$error.rate.E)
results$est.correct.A<-ifelse(results$error.rate.A>=precision.lower & results$error.rate.A<=precision.upper,100,0)
results$est.correct.B<-ifelse(results$error.rate.B>=precision.lower & results$error.rate.B<=precision.upper,100,0)
results$est.correct.C<-ifelse(results$error.rate.C>=precision.lower & results$error.rate.C<=precision.upper,100,0)
results$est.correct.D<-ifelse(results$error.rate.D>=precision.lower & results$error.rate.D<=precision.upper,100,0)
results$est.correct.E<-ifelse(results$error.rate.E>=precision.lower & results$error.rate.E<=precision.upper,100,0)
p.correct.vals.A<-aggregate(data=results,est.correct.A~CLASS+gridres+nsqr,mean)
p.correct.vals.B<-aggregate(data=results,est.correct.B~CLASS+gridres+nsqr,mean)
p.correct.vals.C<-aggregate(data=results,est.correct.C~CLASS+gridres+nsqr,mean)
p.correct.vals.D<-aggregate(data=results,est.correct.D~CLASS+gridres+nsqr,mean)
p.correct.vals.E<-aggregate(data=results,est.correct.E~CLASS+gridres+nsqr,mean)
p.correct.A<-p.correct.vals.A
p.correct.B<-p.correct.vals.B
p.correct.C<-p.correct.vals.C
p.correct.D<-p.correct.vals.D
p.correct.E<-p.correct.vals.E
p.correct.A$est.correct.A<-ifelse(p.correct.A$est.correct.A>=95,1,0)
p.correct.B$est.correct.B<-ifelse(p.correct.B$est.correct.B>=95,1,0)
p.correct.C$est.correct.C<-ifelse(p.correct.C$est.correct.C>=95,1,0)
p.correct.D$est.correct.D<-ifelse(p.correct.D$est.correct.D>=95,1,0)
p.correct.E$est.correct.E<-ifelse(p.correct.E$est.correct.E>=95,1,0)
habs.correct.A<-aggregate(data=p.correct.A,est.correct.A~nsqr+gridres,sum)
habs.correct.B<-aggregate(data=p.correct.B,est.correct.B~nsqr+gridres,sum)
habs.correct.C<-aggregate(data=p.correct.C,est.correct.C~nsqr+gridres,sum)
habs.correct.D<-aggregate(data=p.correct.D,est.correct.D~nsqr+gridres,sum)
habs.correct.E<-aggregate(data=p.correct.E,est.correct.E~nsqr+gridres,sum)
#produce summary graph per zone
zones<-c('A','B','C','D','E')
#some zones lack certain habitats - this is the max number of habitats present
max.habs<-c(12,20,16,21,27)
for(z in 1:5) {
this.zone<-zones[z]
this.max.habs<-max.habs[z]
assign("this.results",get(apropos(paste0('habs.correct.',this.zone) ) ) )
names(this.results)[3]<-'ncorrect'
loc.name<-paste0('sim_random_sampling_by_zone_15pc_error\\nhabs_correctly_assessed_zone_',this.zone,'.png')
png(loc.name,width=400,height=400)
par(mar=c(5.1,4.1,0.2,0.2))
plot(this.results$nsqr[this.results$gridres=='100m'],this.results$ncorrect[this.results$gridres=='100m'],ylim=c(0,this.max.habs),xlab='Sample size',ylab='Habitats correctly assessed',pch=16,type='l',lty=1, cex.lab=1.25)
par(new=T)
plot(this.results$nsqr[this.results$gridres=='200m'],this.results$ncorrect[this.results$gridres=='200m'],ylim=c(0,this.max.habs),xlab='Sample size',ylab='Habitats correctly assessed',pch=16,type='l',lty=2, cex.lab=1.25)
par(new=T)
plot(this.results$nsqr[this.results$gridres=='500m'],this.results$ncorrect[this.results$gridres=='500m'],ylim=c(0,this.max.habs),xlab='Sample size',ylab='Habitats correctly assessed',pch=16,type='l',lty=3, cex.lab=1.25)
abline(h=this.max.habs,col='red')
leg.text<-c('100-m','200-m','500-m')
legend('topleft',leg.text,lty=c(1,2,3))
dev.off()
}
#save results
#table.loc<-'X:\\Shared Projects\\Defra80 - EODIP5\\gis analysis\\sim_random_sampling_15pc_error\\how_often_correct.csv'
#write.table(p.correct,table.loc,row.names=F,sep=',')
#check which habitats are covered in each zone using 500m grid
zonehabstats<-read.table('n_squares_with_without_hab_byzone_ref500m.csv',sep=',',header=T, colClasses = c(rep('character',2),rep('numeric',3)))
zonehabstats$pwith<-with(data=zonehabstats,sqrswithhab/nsquares)
head(zonehabstats)
#Zone A
habs.A<-subset(zonehabstats,pwith>=0.01 & zone=='A')
#trim to hab, grid res and rows with adequate precision
p.correct.vals.A2<-subset(p.correct.vals.A,CLASS %in% habs.A$CLASS & gridres=='500m' & est.correct.A>=95)
#get largest sample size that works
p.correct.vals.A2[!duplicated(p.correct.vals.A2$CLASS),]
#print best achieved error rate for those that fail
subset(p.correct.vals.A,CLASS %in% habs.A$CLASS & gridres=='500m' & est.correct.A<95 & nsqr==2000)
#Zone B
habs.B<-subset(zonehabstats,pwith>=0.01 & zone=='B')
#trim to hab, grid res and rows with adequate precision
p.correct.vals.B2<-subset(p.correct.vals.B,CLASS %in% habs.B$CLASS & gridres=='500m' & est.correct.B>=95)
#get largest sample size that works
p.correct.vals.B2[!duplicated(p.correct.vals.B2$CLASS),]
#print best achieved error rate for those that fail
subset(p.correct.vals.B,CLASS %in% habs.B$CLASS & gridres=='500m' & est.correct.B<95 & nsqr==2000)
#Zone C
habs.C<-subset(zonehabstats,pwith>=0.01 & zone=='C')
#trim to hab, grid res and rows with adequate precision
p.correct.vals.C2<-subset(p.correct.vals.C,CLASS %in% habs.C$CLASS & gridres=='500m' & est.correct.C>=95)
#get largest sample size that works
p.correct.vals.C2[!duplicated(p.correct.vals.C2$CLASS),]
#print best achieved error rate for those that fail
subset(p.correct.vals.C,CLASS %in% habs.C$CLASS & gridres=='500m' & est.correct.C<95 & nsqr==2000)
#Zone D
habs.D<-subset(zonehabstats,pwith>=0.01 & zone=='D')
#trim to hab, grid res and rows with adequate precision
p.correct.vals.D2<-subset(p.correct.vals.D,CLASS %in% habs.D$CLASS & gridres=='500m' & est.correct.D>=95)
#get largest sample size that works
p.correct.vals.D2[!duplicated(p.correct.vals.D2$CLASS),]
#print best achieved error rate for those that fail
subset(p.correct.vals.D,CLASS %in% habs.D$CLASS & gridres=='500m' & est.correct.D<95 & nsqr==2000)
#Zone E
habs.E<-subset(zonehabstats,pwith>=0.01 & zone=='E')
#trim to hab, grid res and rows with adequate precision
p.correct.vals.E2<-subset(p.correct.vals.E,CLASS %in% habs.E$CLASS & gridres=='500m' & est.correct.E>=95)
#get largest sample size that works
p.correct.vals.E2[!duplicated(p.correct.vals.E2$CLASS),]
#print best achieved error rate for those that fail
subset(p.correct.vals.E,CLASS %in% habs.E$CLASS & gridres=='500m' & est.correct.E<95 & nsqr==2000)
|
f3b9e5e9269fdedae937182c508ba7217788de3c | 33b7262af06cab5cd28c4821ead49b3a0c24bb9d | /RegressionTests/compare.R | 8e950023a4f331ee254ae289a95455cbab7c89bf | [] | no_license | topepo/caret | d54ea1125ad41396fd86808c609aee58cbcf287d | 5f4bd2069bf486ae92240979f9d65b5c138ca8d4 | refs/heads/master | 2023-06-01T09:12:56.022839 | 2023-03-21T18:00:51 | 2023-03-21T18:00:51 | 19,862,061 | 1,642 | 858 | null | 2023-03-30T20:55:19 | 2014-05-16T15:50:16 | R | UTF-8 | R | false | false | 3,820 | r | compare.R | setwd("~/tmp")
##############################################################
Old <- "2018_03_10_21__6.0-79"
New <- "2018_05_25_22__6.0-80"
oldResults <- list.files(file.path(getwd(), Old), pattern = "RData")
newResults <- list.files(file.path(getwd(), New), pattern = "RData")
oldOrphan <- oldResults[!(oldResults %in% newResults)]
newOrphan <- newResults[!(newResults %in% oldResults)]
common <- intersect(oldResults, newResults)
checkModels <- function(model, opath, npath){
for(i in model) {
thisMod <- gsub(".RData", "", i, fixed = TRUE)
cat("############################################################\n")
cat(thisMod, "\n")
rlt <- checkResults(i,opath, npath)
if(!is.null(rlt)) print(rlt) else print(TRUE)
cat("\n")
rm(rlt)
}
}
getObj <- function(filename){
load(filename)
testObj <- ls(pattern = "^test")
testResults <- list()
for(i in seq(along = testObj)) {
tmp <- get(testObj[i])
if(!is.null(tmp)) {
testResults <- c(testResults, list(tmp))
names(testResults)[length(testResults)] <- testObj[i]
rm(tmp)
}
}
testResults
}
checkResults <- function(model, opath, npath){
oldResults <- getObj(file.path(getwd(), opath, model))
newResults <- getObj(file.path(getwd(), npath, model))
commonObj <- intersect(names(newResults), names(oldResults))
testResults <- vector(mode = "list", length = length(commonObj))
names(commonObj) <- commonObj
for(i in commonObj) {
cat("\n", i, "\n\n")
if((class(newResults[[i]])[1] == class(oldResults[[i]])[1])) {
if(!is.null(newResults[[i]]) & !is.null(oldResults[[i]])) {
if(class(newResults[[i]])[1] == "train") {
checkTrain(oldResults[[i]], newResults[[i]])
} else {
testResults[[i]] <- all.equal(newResults[[i]],
oldResults[[i]],
tolerance = 0.001)
print(testResults[[i]])
}
} else cat("skipping due to NULL", i, "\n")
} else cat("skipping due to conflicting classes", i, "\n")
}
}
notEqual <- function(x) if(class(x)[1] != "logical" || !x) TRUE else FALSE
checkTrain <- function(Old, New = NULL) {
if(!is.null(Old) & !is.null(New)) {
oldRes <- Old$results[, !grepl("SD$", names(Old$results))]
newRes <- New$results[, !grepl("SD$", names(New$results))]
param <- gsub("^\\.", "", names(Old$bestTune))
if(Old$method == "C5.0Cost") {
names(oldRes)[names(oldRes) == "Cost"] <- "cost"
param[param == "Cost"] <- "cost"
}
pNames <- Old$perfNames
names(oldRes)[names(oldRes) %in% pNames] <- paste("Old_",
names(oldRes)[names(oldRes) %in% pNames],
sep = "")
names(newRes)[names(newRes) %in% pNames] <- paste("New_",
names(newRes)[names(newRes) %in% pNames],
sep = "")
both <- merge(oldRes, newRes, all = !(Old$method %in% c("C5.0Cost", "M5Rules", "leapSeq", "leapBackward", "leapForward")))
for(i in pNames) {
cat(i, ":", sep = "")
aeq <- all.equal(both[, paste("Old_", i, sep = "")],
both[, paste("New_", i, sep = "")])
print(aeq)
if(notEqual(aeq)) {
cr <- cor(both[, paste("Old_", i, sep = "")],
both[, paste("New_", i, sep = "")],
use = "pairwise.complete.obs")
cat("\t\t\tcorr:", round(cr, 2),
"\n")
if(is.na(cr) || cr< .8) print(both[, c(param, paste("Old_", i, sep = ""),
paste("New_", i, sep = ""))])
}
}
}
}
checkModels(common, Old, New)
q("no")
|
4714a7ca4647fe3ac13414e98d8101e5bf21e34f | 4d0db5c2e04637437fc318cb50267b65341f53cc | /DataAnalysis/analysis.r | 6bdf646d3f6c6d8ac73a129b3a903602dfad9758 | [] | no_license | blukaniro/TrainingGrad190716 | a70b065cdae046d4f945cdac978ca1f8b444c23a | c727e21d2319849ac77e84b6d304298211edaf5d | refs/heads/master | 2020-06-20T01:25:52.263030 | 2019-07-19T03:42:20 | 2019-07-19T03:42:20 | 196,943,285 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,713 | r | analysis.r | # 190719田島亮介
# データの読み込みと整理
d<-read.table("dataset.txt", header=T)
d2<-d[c(-4,-11),] #出液とSFWが著しく大きい値のものを外れ値として除外
# 分散分析
summary(aov(sap~treatment,d2))
summary(aov(SFW~treatment,d2))
summary(aov(panicle~treatment,d2))
summary(aov(root~treatment,d2))
## 新鮮重: 10%水準で,慣行>有機,節根数:5%水準で慣行>有機
## これらを栽培方法との関係から考察.あるいは有意差なしの考察(をしないと出液速度に触れられない?)
# 相関のチェック
plot(d2[,2:5])
cor(d2[,2:5])
## 新鮮重,茎数,節根数の相関高い
## これらの考察
# データから指標算出
root_per_panicle<-d2$root/d2$panicle #茎数あたりの節根数
sap_per_panicle<-d2$sap/d2$panicle #茎数あたりの出液速度
sap_per_root<-d2$sap/d2$root #根数あたりの出液速度
d2<-data.frame(d2,rp=root_per_panicle, sp=sap_per_panicle, sr=sap_per_root) #データ追加
# 分散分析
summary(aov(rp~treatment,d2))
summary(aov(sp~treatment,d2))
summary(aov(sr~treatment,d2))
## 茎数あたりの節根数: 5%水準で慣行>有機
## 茎数あたりの出液速度: 5%水準で慣行<有機
## 根数あたりの出液速度: 1%水準で慣行<有機
## この関係を栽培方法との関係から考察
# 図表は箱ひげ図,棒グラフ,相関図等がわかりやすく表記され適切に言及されている
par(mfrow=c(2,4))
boxplot(sap~treatment,d2)
boxplot(SFW~treatment,d2)
boxplot(panicle~treatment,d2)
boxplot(root~treatment,d2)
boxplot(rp~treatment,d2)
boxplot(sp~treatment,d2)
boxplot(sr~treatment,d2)
|
29c3e7b33a150e0dc72d1d191265937949231c7b | 3c38d8cbe00ffb6d1150682ea1f3c79acfc33d96 | /R/project_to.R | 0a267ccfe4c09591db0c68e5609248cce865d8d9 | [] | no_license | HughParsonage/grattan | c0dddf3253fc91511d122870a65e65cc918db910 | cc3e37e1377ace729f73eb1c93df307a58c9f162 | refs/heads/master | 2023-08-28T00:12:35.729050 | 2023-08-25T08:02:25 | 2023-08-25T08:02:25 | 30,398,321 | 26 | 11 | null | 2022-06-26T15:44:27 | 2015-02-06T06:18:13 | R | UTF-8 | R | false | false | 1,770 | r | project_to.R | #' Simple projections of the annual 2\% samples of Australian Taxation Office tax returns.
#'
#' @param sample_file A \code{data.table} matching a 2\% sample file from the ATO.
#' See package \code{taxstats} for an example.
#' @param to_fy A string like "1066-67" representing the financial year for which forecasts of the sample file are desired.
#' @param fy.year.of.sample.file The financial year of \code{sample_file}. See \code{\link{project}} for the default.
#' @param ... Other arguments passed to \code{\link{project}}.
#' @return A sample file with the same number of rows as \code{sample_file} but
#' with inflated values as a forecast for the sample file in \code{to_fy}.
#' If \code{WEIGHT} is not already a column of \code{sample_file}, it will be added and its sum
#' will be the predicted number of taxpayers in \code{to_fy}.
#' @export
project_to <- function(sample_file, to_fy, fy.year.of.sample.file = NULL, ...) {
if (is.null(fy.year.of.sample.file)) {
fy.year.of.sample.file <-
match(nrow(sample_file), c(254318L, 258774L, 263339L, 269639L, 277202L))
if (is.na(fy.year.of.sample.file)) {
stop("`fy.year.of.sample.file` was not provided, yet its value could not be ",
"inferred from nrow(sample_file) = ", nrow(sample_file), ". Either use ",
"a 2% sample file of the years 2012-13, 2013-14, 2014-15, 2015-16, 2016-17, or ",
"supply `fy.year.of.sample.file` manually.")
}
fy.year.of.sample.file <-
c("2012-13", "2013-14", "2014-15", "2015-16", "2016-17")[fy.year.of.sample.file]
}
h <- as.integer(fy2yr(to_fy) - fy2yr(fy.year.of.sample.file))
project(sample_file = sample_file, h = h,
fy.year.of.sample.file = fy.year.of.sample.file,
...)
}
|
c3c49b4d20e99321bae60826f2ca7a5066b71ef7 | fe734fd0801d41d36a223b66851e0b1d2a6b1eed | /data_analytics.R | c9c4a628fe22338f2a51c65de553a92b4055644e | [] | no_license | anyelacamargo/babycorn | a6ef1053d4842c8f351608c58530d784a273f046 | fbf90d0ffe5514ab5c1b57d21af1378a1f9ca682 | refs/heads/master | 2020-09-24T02:18:09.139475 | 2020-03-04T10:30:16 | 2020-03-04T10:30:16 | 225,638,213 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,222 | r | data_analytics.R | library(openxlsx)
library(ggplot2)
library(reshape2)
#library(dplyr)
library(caret)
library(outliers)
library(glmnet)
library(e1071)
library(xgboost)
parm_search_xgboost <- function(dtrain){
searchGridSubCol <- expand.grid(subsample = c(0.5, 0.75, 1),
colsample_bytree = c(0.6, 0.8, 1),
lambda = seq(0, 1, by = 0.5),
lambda_bias = seq(0, 1, by = 0.5),
alpha = seq(0, 1, by = 0.5),
eta = seq(0, 1, by = 0.1))
ntrees <- 100
rmseErrorsHyperparameters <- apply(searchGridSubCol, 1, function(parameterList){
#Extract Parameters to test
currentSubsampleRate <- parameterList[["subsample"]]
currentColsampleRate <- parameterList[["colsample_bytree"]]
lambda <- parameterList[["lambda"]]
lambda_bias <- parameterList[["lambda_bias"]]
alpha <- parameterList[["alpha"]]
eta <- parameterList[["eta"]]
xgboostModelCV <- xgb.cv(data = dtrain, nrounds = ntrees, nfold = 5,
showsd = TRUE,
metrics = "rmse", verbose = FALSE, "eval_metric" = "rmse",
"objective" = "reg:linear", 'booster' = "gblinear",
"max.depth" = 15,
"subsample" = currentSubsampleRate,
"colsample_bytree" = currentColsampleRate,
'lambda' = lambda,
'lambda_bias' = lambda_bias,
'alpha' = alpha, 'eta' = eta)
xvalidationScores <- data.frame(xgboostModelCV$evaluation_log)
#print(xvalidationScores)
#Save rmse of the last iteration
rmse <- tail(xvalidationScores$test_rmse_mean, 1)
return(c(rmse, currentSubsampleRate, currentColsampleRate, lambda,
lambda_bias, alpha, eta))
})
return(rmseErrorsHyperparameters[,which(rmseErrorsHyperparameters[1,] ==
min(rmseErrorsHyperparameters[1,]))])
}
run_gboosting <- function(sdata, class_name, descriptor_list){
par(mfrow = c(2,2))
for(kw in c('SPAD', 'NDVI', 'LCC')){
i <- grep(kw, descriptor_list)
subdata <- sdata[, c(descriptor_list[c(1:4, i)], class_name)]
index <- createDataPartition(subdata[[class_name]], p = 0.7, list = FALSE)
train_data <- subdata[index, ]
test_data <- subdata[-index, ]
dtrain <- xgb.DMatrix(data=as.matrix(train_data[, -match(class_name,
colnames(train_data))]),
label=train_data$UPYLD, missing=NA)
dtest <- xgb.DMatrix(data=as.matrix(test_data[, -match(class_name,
colnames(test_data))]),
label=test_data$UPYLD, missing=NA)
param <- list(booster = "gblinear",
objective = "reg:linear",
subsample = 0.7,
max_depth = 2,
colsample_bytree = 0.7,
eval_metric = 'mae',
base_score = 0.012,
min_child_weight = c(1,2),
lambda = 1,
lambda_bias = 0.5,
alpha = 0,
eta = 0.1)
xgb_cv <- xgb.cv(data=dtrain,
params=param,
nrounds=100,
prediction=TRUE,
maximize=FALSE,
#folds=foldsCV,
nfold = 5,
early_stopping_rounds = 30,
print_every_n = 5
)
# Check best results and get best nrounds
print(xgb_cv$evaluation_log[which.min(xgb_cv$evaluation_log$test_mae_mean)])
nrounds <- xgb_cv$best_iteration
xgb <- xgb.train(params = param
, data = dtrain
# , watchlist = list(train = dtrain)
, nrounds = nrounds
, verbose = 1
, print_every_n = 5
#, feval = amm_mae
)
xgb.pred <- predict(xgb, dtest)
plot(test_data[[class_name]], xgb.pred , col = "blue", pch=4, main = 'GB')
res <- caret::postResample(as.numeric(test_data[[class_name]]), xgb.pred)
print(res)
}
}
tune_svm <- function(sdata, f){
tc <- tune.control(cross = 5)
tuned_par <- tune.svm(f, data = sdata,
gamma = 10^(-5:-1), cost = seq(1,3, by = 0.5),
epsilon = seq(0, 0.2, by = 0.01), tunecontrol = tc)
return(tuned_par)
}
run_SVM <- function(sdata, class_name, descriptor_list){
#index <- sample(1:nrow(sdata))
par(mfrow = c(2,2))
for(kw in c('SPAD', 'NDVI', 'LCC')){
i <- grep(kw, descriptor_list)
subdata <- sdata[, c(descriptor_list[c(1:4, i)], class_name)]
index <- createDataPartition(subdata[[class_name]], p = 0.7, list = FALSE)
train_data <- subdata[index, ]
test_data <- subdata[-index, ]
set.seed(42)
f <- as.formula(paste(class_name, '~ .'))
o <- tune_svm(train_data, f)
tuned_par <- list()
tuned_par$gamma <- o$best.parameters$gamma
tuned_par$epsilon <- o$best.parameters$epsilon
tuned_par$cost <- o$best.parameters$cost
save(o, file = 'o.dat')
svm_model <- svm(f, train_data, gamma = tuned_par$gamma,
epsilon = tuned_par$epsilon,
cost=tuned_par$cost,
type="eps-regression", kernel = 'radial')
w <- t(svm_model$coefs) %*% svm_model$SV
b <- -svm_model$rho
# in this 2D case the hyperplane is the line w[1,1]*x1 + w[1,2]*x2 + b = 0
predictYsvm <- predict(svm_model, test_data[, -match(class_name, colnames(test_data))])
res <- caret::postResample(as.numeric(test_data[[class_name]]), predictYsvm)
plot(test_data[[class_name]], predictYsvm, col = "green", pch=4, main =
paste(class_name, ' prediction using ', kw, sep = ''),
ylab= 'predicted', xlab = 'observed')
abline(a=-b/w[1,2], b=-w[1,1]/w[1,2], col="blue", lty=3, lwd=1)
text(10.5, 8, paste('R2 = ', round(res[[2]],2), sep =''))
print(res)
}
}
file_names <- list.files(path = '.', pattern = '.xlsx')
fnames <- read.csv('field_names.csv')
raw_trial_data <- c()
for(fname in file_names){
sname <- getSheetNames(fname)
sname <- sname[-grep('details', sname)]
for(tname in sname){
raw_trial_data <- rbind(raw_trial_data,
cbind(year = strsplit(fname, '\\ ')[[1]][3],
exp = strsplit(tname, '\\ ')[[1]][2],
readWorkbook(fname, tname)))
}
}
# raw_trial_data[['Var']] <- as.factor(raw_trial_data[['Var']])
# raw_trial_data[['N']] <- as.factor(raw_trial_data[['N']])
# raw_trial_data[['P']] <- as.factor(raw_trial_data[['P']])
# raw_trial_data[['S']] <- as.factor(raw_trial_data[['S']])
raw_trial_data <- raw_trial_data[, 1:50]
descriptor_list <- c("Var", "N", "P", "S",
"SPAD1", "SPAD2", "SPAD3",
"NDVI1", "NDVI2", "NDVI3",
"LCC1","LCC2","LCC3",
"LFW2", "LFW3",
"LDW2", "LDW3",
"LeafN2","LeafN3",
"LNU2", "LNU3",
"PFW1", "PDW1", "PlN1",
"PNU1", "PFW2", "PDW2", "PlN2", "PNU2",
"PFW3", "PDW3", "PlN3", "PNU3")
class_name <- 'UPYLD'
run_SVM(raw_trial_data, class_name, descriptor_list)
break
v <- c("LFW", "LDW", "LeafN", "LNU","SPAD","LCC","PFW","PDW","PlN","PNU","NDVI")
#m <- raw_trial_data
predictor <- v[1]
i <- grep(predictor, colnames(raw_trial_data))
m <- melt(raw_trial_data, id.vars = names(raw_trial_data)[1:17],
measure.vars = paste(predictor, 1:3, sep = ''),
variable.name = 'sample',
value.name = predictor)
colnames(m)[ncol(m)] = predictor
for(predictor in v[2:length(v)]){
print(predictor)
#i <- grep('sample', colnames(m))
#if(length(i) != 0){
# m <- m[, -i]
#}
i <- grep(predictor, colnames(raw_trial_data))
mm <- melt(raw_trial_data, id.vars = names(raw_trial_data)[1:17],
measure.vars = paste(predictor, 1:3, sep = ''),
variable.name = 'sample',
value.name = predictor)
m <- cbind(m, mm[,19])
colnames(m)[ncol(m)] = predictor
}
#Unpeeled Baby Corn Rs
UBC <- 9000
#The price of N is Rs. per ton.
PoN <- 12400
i <- grep('sample', colnames(m))
m$sample <- as.numeric(m$sample)
m[['sample']] <- as.factor(m[['sample']])
m[['Rep']] <- as.factor(m[['Rep']])
#' Output 1
m1 <- subset(m, exp != '3')
#m1 <- mutate(m1, Nv = (((as.numeric(N)-1) * 30) * PoN)/100)
#m1 <- mutate(m1, Yv = as.numeric(UPYLD) * UBC)
#m1[['Nv']] <- as.factor(m1[['Nv']])
break
pdf('babycorn_prelplots.pdf')
caption_n <- 'Col on right, left-to-right, indicate exp, Phosphorus level,
Spacing level, Var = Seed variety'
for(tname in names(m1)[8:17]){
tiff(paste(tname, '.tiff', sep = ''), res = 100)
i <- match(tname, fnames$tname)
p <- ggplot(m1, aes_string(x = 'N', y = tname, color = 'Var', fill = 'Var')) +
geom_boxplot(outlier.colour = "red", outlier.shape = 1, fatten = 1,
notch = FALSE, color="black") +
#geom_point(aes_string(x = 'Nv', y = 'Yv')) +
#geom_smooth() +
facet_grid(S + P + exp ~ year) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x = element_text(size = 6),
axis.text.y = element_text(size = 6),
axis.title.x = element_text(size = 8),
axis.title.y = element_text(size = 8)) +
labs(title = "Babycorn data analytics preliminary results by year",
#subtitle = "Carat weight by Price",
caption = caption_n) +
xlab('Nitrogen levels') + ylab(fnames[i, 2])
print(p)
dev.off()
}
caption_n <- 'Cols on right, left-to-right, indicate exp, Phosphorus level,
Spacing level, Var = Seed variety'
for(tname in names(m1)[19:29]){
tiff(paste(tname, '.tiff', sep = ''), res = 100)
i <- match(tname, fnames$tname)
p <- ggplot(m1, aes_string(x = 'N', y = tname, color = 'Var', fill = 'Var')) +
geom_boxplot(outlier.colour = "red", outlier.shape = 1, fatten = 1,
notch = FALSE, color="black") +
#geom_point(aes_string(x = 'Nv', y = 'Yv')) +
#geom_smooth() +
facet_grid(S + P + exp ~ sample) +
theme(panel.background = element_rect(fill = "white", colour = "grey50"),
axis.text.x = element_text(size = 6),
axis.text.y = element_text(size = 6),
axis.title.x = element_text(size = 8),
axis.title.y = element_text(size = 8)) +
labs(title = "Babycorn data analytics preliminary results by sample",
#subtitle = "Carat weight by Price",
caption = caption_n) +
xlab('Nitrogen levels') + ylab(fnames[i, 2])
print(p)
dev.off()
}
dev.off()
|
877768eef339e735c20e57793986e2549ccab1d5 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/emmeans/examples/rbind.emmGrid.Rd.R | 22ec369431f535cfdc2fe53a2f21b625e53df904 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | rbind.emmGrid.Rd.R | library(emmeans)
### Name: rbind.emmGrid
### Title: Combine or subset 'emmGrid' objects
### Aliases: rbind.emmGrid +.emmGrid [.emmGrid
### ** Examples
warp.lm <- lm(breaks ~ wool * tension, data = warpbreaks)
warp.rg <- ref_grid(warp.lm)
# Show only 3 of the 6 cases
summary(warp.rg[c(2,4,5)])
# Do all pairwise comparisons within rows or within columns,
# all considered as one faily of tests:
w.t <- pairs(emmeans(warp.rg, ~ wool | tension))
t.w <- pairs(emmeans(warp.rg, ~ tension | wool))
rbind(w.t, t.w, adjust = "mvt")
update(w.t + t.w, adjust = "fdr") ## same as abve except for adjustment
|
4801fda0b755959f97bef3118952599618e86bcd | 97106176566468697903370a30a9b68661167af3 | /ps4ex2.R | 9970808d5f71f83a344672023803a4d128779b69 | [] | no_license | rossihabibi/Econometrics4Law16 | 91d120dcba6793b432b49432b21ae8a8d795977a | a251c9de65edcb6a87a2e1ed41c4dc5268958c5d | refs/heads/master | 2020-12-24T06:13:50.626704 | 2016-12-09T20:45:33 | 2016-12-09T20:45:33 | 73,163,681 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,318 | r | ps4ex2.R | rm(list = ls())
library(tidyr)
library(dplyr) # for easy data manipulation
library(AER) # ivreg
library(foreign) # to use stata data formats
options(digits = 2)
download.file('http://fmwww.bc.edu/ec-p/data/wooldridge/mroz.dta', 'data/mroz.dta', mode="wb")
mroz <- read.dta("data/mroz.dta")
head(mroz)
dim(mroz) #17 variables, 935 observations
# We want to study only the intensive margin, so keep only women who are in the labour force at the time of the study
mroz <- filter(mroz, inlf == 1)
## 1
iv1 <- ivreg(lwage ~ educ + exper + expersq | fatheduc + motheduc + exper + expersq, data = mroz)
summary(iv1)
# The effect of 1 extra year of education is 6% increase in wage, on average. Statistically different than 0, at the 10% level. Effect of experience is much more pronounced and estimated more precisely (Why is that? In the data, there is enough variation in exper and wage to (statistically) identify precisely the coefficient.)
# Effect of 1 more year of experience : a1 + 2*a2
## 2 - 3 - 4 Tests
s_iv1 <- summary(iv1, diagnostics = TRUE)
s_iv1$diagnostics
## 2 - same test as weak instruments, alternatively, run the first stage regression and (jointly or not) test nullity of coefficients of instruments
stage1 <- lm(educ ~ fatheduc + motheduc + exper + expersq, data = mroz)
summary(stage1)
|
b4f35f2f74fe27cfe04404d434c8842349082b00 | 62cfdb440c9f81b63514c9e545add414dc4d5f63 | /man/qat_plot_noc_rule_1d.Rd | c36529c5b33c7e15e6e1a56cdf545f6755e1bbab | [] | no_license | cran/qat | 7155052a40947f6e45ba216e8fd64a9da2926be4 | 92975a7e642997eac7b514210423eba2e099680c | refs/heads/master | 2020-04-15T16:53:45.041112 | 2016-07-24T01:26:59 | 2016-07-24T01:26:59 | 17,698,828 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,942 | rd | qat_plot_noc_rule_1d.Rd | \name{qat_plot_noc_rule_1d}
\alias{qat_plot_noc_rule_1d}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot a NOC rule result}
\description{
A plot of the result of a NOC rule check will be produced.
}
\usage{
qat_plot_noc_rule_1d(flagvector, filename, measurement_vector = NULL,
max_return_elements = 0, measurement_name = "", directoryname = "",
plotstyle = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{flagvector}{The resulting flagvector of qat\_analyse\_noc\_rule\_1d}
\item{filename}{Name of the file without extension.}
\item{measurement_vector}{The measurement vector, which should be plotted}
\item{max_return_elements}{The number of maximum reruning elements, which was used in the test.}
\item{measurement_name}{Name of the measurement.}
\item{directoryname}{Directory, where the resulted file should be stored.}
\item{plotstyle}{A list with a qat color scheme.}
}
\details{
A plot will be produced, which base on the resulting flagvector of qat\_analyse\_noc\_rule\_1d. With additional information on the parameters, which were used while performing the test, this function will produce a more detailed plot. When no plotstyle is defined the standard-colorscheme will be used. The resulting plot will be stored in the folder, which is defined by directory under the given filename, with the extension png.
}
\value{
No return value.
}
\author{Andre Duesterhus}
\seealso{\code{\link{qat_analyse_noc_rule_1d}}}
\examples{
vec <- c(1,2,3,4,4,4,5,5,4,3,NaN,3,2,1)
result <- qat_analyse_noc_rule_1d(vec, 1)
# this example produce a file exampleplot_noc.png in the current directory
qat_plot_noc_rule_1d(result$flagvector, "exampleplot_noc", measurement_vector=vec,
max_return_elements=result$max_return_elements, measurement_name="Result of Check")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ts} |
d3d1280a59b973d26e56a3da712bf413c13b65c0 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /ROI.plugin.qpoases/man/Example_01.Rd | 44e21bb85c5c9aaf38db09917d56f1d09c7d77f9 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 701 | rd | Example_01.Rd | \name{Example-1}
\title{Quadratic Problem 1}
\description{
\deqn{maximize \ \ x_1^2 + x_2^2 + x_3^2 - 5 x_2}
\deqn{subject \ to:}
\deqn{-4 x_1 - 3 x_2 + \geq -8}
\deqn{ 2 x_1 + x_2 + \geq 2}
\deqn{ - 2 x_2 + x_3 \geq 0}
\deqn{x_1, x_2, x_3 \geq 0}
}
\examples{
require("ROI")
A <- cbind(c(-4, -3, 0),
c( 2, 1, 0),
c( 0, -2, 1))
x <- OP(Q_objective(diag(3), L = c(0, -5, 0)),
L_constraint(L = t(A),
dir = rep(">=", 3),
rhs = c(-8, 2, 0)))
opt <- ROI_solve(x, solver="qpoases")
opt
## Optimal solution found.
## The objective value is: -2.380952e+00
solution(opt)
## [1] 0.4761905 1.0476190 2.0952381
}
|
cc6b3fb041b323628fffd80903b8170301139514 | 946f724c55b573ef4c0d629e0914bb6bca96f9e9 | /man/Posterior_phi.Rd | f32ab6c4b6ed2f611ef26e7c35dd991e1ab276ba | [] | no_license | stla/brr | 84fb3083383e255a56812f2807be72b0ace54fd6 | a186e16f22b9828c287e3f22891be22b89144ca6 | refs/heads/master | 2021-01-18T22:59:46.721902 | 2016-05-30T09:42:14 | 2016-05-30T09:42:14 | 35,364,602 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,208 | rd | Posterior_phi.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/posteriors.R
\name{Posterior_phi}
\alias{Posterior_phi}
\alias{dpost_VE}
\alias{dpost_phi}
\alias{ppost_VE}
\alias{ppost_phi}
\alias{qpost_VE}
\alias{qpost_phi}
\alias{rpost_phi}
\alias{spost_phi}
\title{Posterior distribution on the relative risk and the vaccine efficacy}
\usage{
dpost_phi(phi, a, b, c, d, S, T, x, y, ...)
dpost_VE(VE, a, b, c, d, S, T, x, y, ...)
ppost_phi(q, a, b, c, d, S, T, x, y, ...)
ppost_VE(q, a, b, c, d, S, T, x, y, ...)
qpost_phi(p, a, b, c, d, S, T, x, y, ...)
qpost_VE(p, a, b, c, d, S, T, x, y, ...)
rpost_phi(n, a, b, c, d, S, T, x, y)
spost_phi(a, b, c, d, S, T, x, y, ...)
}
\arguments{
\item{phi,VE,q}{vector of quantiles}
\item{a,b}{non-negative shape parameter and rate parameter of the prior Gamma distribution on the control incidence rate}
\item{c,d}{non-negative shape parameters of the prior distribution on the relative risk}
\item{S,T}{sample sizes in control group and treated group}
\item{x,y}{counts in the treated group and control group}
\item{...}{other arguments passed to \code{\link{Beta2Dist}}}
\item{p}{vector of probabilities}
\item{n}{number of observations to be simulated}
}
\value{
\code{dpost_phi} gives the density, \code{ppost_phi} the distribution function, \code{qpost_phi} the quantile function,
\code{rpost_phi} samples from the distribution, and \code{spost_phi}
gives a summary of the distribution.
}
\description{
Density, distribution function, quantile function and random
generation for the posterior distribution on relative risk or the vaccine efficacy.
}
\details{
The prior distribution on the relative risk \eqn{\phi} is the Beta2 distribution
with shape parameters \eqn{c} and \eqn{d} and scale parameter \eqn{(T+b)/S}.
}
\note{
\code{Posterior_phi} is a generic name for the functions documented.
}
\examples{
a <- 2; b <- 2; c <- 3; d <- 4; S <- 1; T <- 1; x <- 2; y <- 6
spost_phi(a, b, c, d, S, T, x, y, output="pandoc")
require(magrittr)
phi <- seq(0, 6, length.out=100)
phi \%>\% { plot(., dpost_phi(., a, b, c, d, S, T, x, y), type="l") }
phi \%>\% { lines(., dprior_phi(., b, c, d, S, T), col="red") }
}
|
8f54b2deb1719945d90cf5168aa98b4d3c659614 | 4e38799dd969f4c2fef6b34e9539f63f8b17d666 | /example/archived_modules/scope_02_TMT_int.R | d5000ce61b825bf94f6d2f552743c27950cc2139 | [
"MIT"
] | permissive | SlavovLab/DO-MS | f97eb2626dd272156e19a25fe50488a036c59f4d | e637a3bd5bfd32558f33e0a23b7302109e3088f7 | refs/heads/master | 2023-08-08T01:37:54.457583 | 2023-07-30T16:30:25 | 2023-07-30T16:30:25 | 141,730,083 | 21 | 8 | MIT | 2023-07-30T16:30:27 | 2018-07-20T15:42:00 | HTML | UTF-8 | R | false | false | 2,807 | r | scope_02_TMT_int.R | init <- function() {
type <- 'plot'
box_title <- 'Reporter ion intensity'
help_text <- 'Plotting the TMT reporter intensities for a single run.'
source_file <- 'evidence'
.validate <- function(data, input) {
validate(need(data()[['evidence']], paste0('Upload evidence.txt')))
# require reporter ion quantification data
validate(need(any(grepl('Reporter.intensity.corrected', colnames(data()[['evidence']]))),
paste0('Loaded data does not contain reporter ion quantification')))
}
.plotdata <- function(data, input) {
TMT_labels <- c('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11')
plotdata <- data()[['evidence']] %>%
dplyr::filter(Type != "MULTI-MATCH") %>%
dplyr::select(Raw.file, starts_with('Reporter.intensity.corrected')) %>%
# rename TMT channels - match the integer at the end of the column name
dplyr::rename_at(vars(starts_with('Reporter.intensity.corrected')),
funs(TMT_labels[as.numeric(str_extract(., '\\d+$'))])) %>%
tidyr::gather('Channel', 'Intensity', -c(Raw.file)) %>%
# reorder manually instead of alphabetically so it doesn't put 10 and 11 before 2
# also reverse so the carriers are at the top
dplyr::mutate(Channel=factor(Channel, levels=rev(TMT_labels))) %>%
dplyr::mutate(Intensity=log10(Intensity)) %>%
#dplyr::filter(!is.infinite(Intensity) & !is.na(Intensity))
dplyr::filter(!is.na(Intensity))
plotdata$Intensity <- ifelse(is.infinite(plotdata$Intensity),2,plotdata$Intensity)
return(plotdata)
}
.plot <- function(data, input) {
.validate(data, input)
plotdata <- .plotdata(data, input)
# compute median channel intensities
channel_medians <- plotdata %>%
group_by(Channel) %>%
summarise(m=median(Intensity, na.rm=T))
# map back to plotdata
plotdata$median_intensity <- channel_medians[plotdata$Channel,]$m
ggplot(plotdata) +
geom_violin(aes(x=Channel, y=Intensity, group=Channel, fill=Channel), alpha=0.6,
kernel='gaussian') + # passes to stat_density, makes violin rectangular
geom_point(aes(x=Channel, y=median_intensity), color='red', shape=3, size=2) +
facet_wrap(~Raw.file, nrow = 1) +
coord_flip() +
scale_fill_discrete(guide=F) +
scale_x_discrete(name='TMT Channel') +
labs(title=NULL, x='TMT Channel', y=expression(bold('Log'[10]*' RI Intensity'))) +
theme_bw() + # make white background on plot
theme_base(input=input)
}
return(list(
type=type,
box_title=box_title,
help_text=help_text,
source_file=source_file,
validate_func=.validate,
plotdata_func=.plotdata,
plot_func=.plot
))
}
|
6971a84bfe83115c0abb88684860ac2a1f19e79a | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /perccal/R/RcppExports.R | d4257b5a7db74fea311e411436c1a8b4abd4c017 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 479 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
sample_rcpp <- function(N, nsamp) {
.Call('_perccal_sample_rcpp', PACKAGE = 'perccal', N, nsamp)
}
Cquantile <- function(xx, p) {
.Call('_perccal_Cquantile', PACKAGE = 'perccal', xx, p)
}
Cdboot_multi <- function(xxyy, lgridlo, lgridhi, B, B2, G) {
.Call('_perccal_Cdboot_multi', PACKAGE = 'perccal', xxyy, lgridlo, lgridhi, B, B2, G)
}
|
4ee650ac361fd960f8ee8d002735722d35833868 | 5cae7a96ee29b4561c5c9e8c50106b6e3f8c3151 | /cachematrix.R | ea3b68eadb23ca480229a5c9bc57da7974feca23 | [] | no_license | HadidVera/ProgrammingAssignment2 | 09916d8742d3cbffc03b209dd5269abc1da2fce9 | 04f9f01bc7c677afb67df0662284cfc4090d5a51 | refs/heads/master | 2021-04-28T10:32:31.701697 | 2018-02-19T16:38:40 | 2018-02-19T16:38:40 | 122,069,152 | 0 | 0 | null | 2018-02-19T13:51:18 | 2018-02-19T13:51:18 | null | UTF-8 | R | false | false | 1,465 | r | cachematrix.R | # Coursera - R programming - Programming assignment (week 3)
# Assignment: Caching the Inverse of a Matrix
# HVera
# February 2018
####
# Matrix inversion is usually a costly computation and there may be some benefit to caching
# the inverse of a matrix rather than compute it repeatedly.
# The following pair of functions cache the inverse of a matrix.
#### function makeCacheMatrix
# This function creates a special "list" object that can cache its inverse and contain
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the inverse of the matrix
# 4. get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()){
m_inv <- NULL
set <- function(y) {
x <<- y
m_inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) m_inv <<- inverse
getinverse <- function() m_inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#### function cacheSolve
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
# If the inverse has already been calculated (and the matrix has not changed), then the function
# should retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
m_inv <- x$getinverse()
if(!is.null(m_inv)) {
message("getting cached data")
return(m_inv)
}
data <- x$get()
m_inv <- solve(data)
x$setinverse(m_inv)
m_inv
}
|
62cd1d3ec02946d05c28a410ea76be66a28d3de1 | a8c00b380003dd12b4957ba02501574f53ef46a6 | /run_analysis.R | 7ff10c4418907a74abc36b1d761d4835e5d2148a | [] | no_license | problemsny/Coursera-Week3-CourseProject | 802366d07dc86b0298b99ad50cfe413cb5d7be02 | 2798ceeaf57d7f5e417a6ad18c3d6ea403d7d192 | refs/heads/master | 2021-01-01T20:05:32.555052 | 2015-05-19T00:54:01 | 2015-05-19T00:54:01 | 35,848,819 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,749 | r | run_analysis.R | # This is for Coursera - Week 3 - Course Project
# Assignment to get and clean data collected on 30 subjects doing 6 different activities
# Of those 30, 70% were selected for training (the "train" files) and 30% as test (the "test" files)
# Refer to the README.txt for further explanation.
# First part - Reading all files into R
#reading the test data
xtest <- read.table("test/X_test.txt")
ytest <- read.table("test/y_test.txt")
subjecttest <- read.table("test/subject_test.txt")
#reading the train data
xtrain <- read.table("train/X_train.txt")
ytrain <- read.table("train/y_train.txt")
subjecttrain <- read.table("train/subject_train.txt")
#reading identifiers for later use
columnnames <- read.table("features.txt") ## This will be used to rename the column names
activities <- read.table("activity_labels.txt") ## This will be used to rename the Activity from numbers
#bind the datasets together
xdata <- rbind(xtest, xtrain)
ydata <- rbind(ytest, ytrain)
subjectdata <- rbind(subjecttest, subjecttrain)
# Second part - extract only mean and std from datasets
#look for the words mean() or std() in columnnames
meanstd <- grep("-(mean|std)\\(\\)", columnnames$V2)
#subset the dataset by mean and std
xdata <- xdata[,meanstd]
# Third part - Use descriptive names for activities
#rename the activity number with associated activity description
ydata[,1] <- activities[ydata[,1],2]
# Fourth part - Label the variable names appropriately
#rename ydata and subjectdata with single test - rename xdata with columnnames
names(subjectdata) <- "subject"
names(ydata) <- "activity"
names(xdata) <- columnnames[meanstd, 2]
# Put them all together nicely :)
finaloutput1 <- cbind(subjectdata, ydata, xdata) |
8686d26f321b36d916fe0bc21547d9c7bba00d4b | ddc2b096e681398f576a95e40c7fd366b65f50a2 | /SDPSimulations/AssortHetHeatMapMK.R | 84949f446e2c2bdc592dd90e304765b977efee08 | [] | no_license | sbellan61/SDPSimulations | f334d96743c90d657045a673fbff309106e45fce | cfc80b116beafabe3e3aed99429fb03d58dc85db | refs/heads/master | 2021-03-27T20:48:25.857117 | 2017-09-19T20:13:37 | 2017-09-19T20:13:37 | 21,144,447 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,020 | r | AssortHetHeatMapMK.R | ####################################################################################################
## Makes control files for each analysis within which each line giving one R CMD BATCH command line
## to run on a cluster.
####################################################################################################
rm(list=ls()) # clear workspace
if(grepl('tacc', Sys.info()['nodename'])) setwd('/home1/02413/sbellan/DHSProject/SDPSimulations/')
if(grepl('nid', Sys.info()['nodename'])) setwd('/home1/02413/sbellan/SDPSimulations/SDPSimulations/')
if(grepl('stevenbellan', Sys.info()['login'])) setwd('~/Documents/R Repos/SDPSimulations/SDPSimulations/')
source("SimulationFunctions.R") # load simulation functions from script
hazs <- c('bmb','bfb','bme','bfe','bmp','bfp') # transmission coefficient names, for convenience
nc <- 48 # core per simulation
## source('AssortHetHeatMapMK.R')
####################################################################################################
## Simulate Tanzania but with assortativity/heterogeneity & varied amounts of pre-couple extra-couple
####################################################################################################
## cc <- which(ds.nm=='Tanzania')
countries <- 15 ## 1:length(ds.nm)
each.val <- 200 ## equates to ~100,000 couples
## blocks.beh <- expand.grid(country = countries, het.beh=T, het.beh.sd=seq(0,3, by = .1), het.beh.cor=seq(0,1, by = .05), bmb.sc = 1)
## blocks.beh$bfb.sc <- blocks.beh$bme.sc <- blocks.beh$bfe.sc <- blocks.beh$bmb.sc ## all contact coefficients scaled same way
## blocks.beh$bmp.sc <- blocks.beh$bfp.sc <- 1
## blocks.beh$het.gen <- F; blocks.beh$het.gen.sd <- blocks.beh$het.gen.cor <- 0
## blocks.beh <- blocks.beh[with(blocks.beh, order(country, het.beh.sd, het.beh.cor, bmb.sc)),]
blocks.gen <- expand.grid(country = countries, het.gen=T, het.gen.sd=seq(0,3, by = .1), het.gen.cor=seq(0,1, by = .05), bmb.sc = 1)
blocks.gen$bfb.sc <- blocks.gen$bme.sc <- blocks.gen$bfe.sc <- blocks.gen$bmb.sc ## all contact coefficients scaled same way
blocks.gen$bmp.sc <- blocks.gen$bfp.sc <- 1
blocks.gen$het.beh <- F; blocks.gen$het.beh.sd <- blocks.gen$het.beh.cor <- 0
blocks.gen <- blocks.gen[with(blocks.gen, order(country, het.beh.sd, het.beh.cor, bmb.sc)),]
## blocks <- as.data.table(rbind(blocks.beh, blocks.gen))
blocksg <- data.table(blocks.gen)
blocksg$jobnum <- 1:nrow(blocksg)
blocksg
nn <- nrow(blocksg)
out.dir <- file.path('results','AssortHetHeatMap2')
blocksg[,c('group','s.epic','s.demog','scale.by.sd','scale.adj','infl.fac','maxN','sample.tmar','psNonPar','each'):= .(country,country, country, T, 1, 200, 10^5, F, F, each.val)]
blocksg[,c('seed','out.dir','sim.nm','doSubs'):=.(1,out.dir, 'AHH', F)]
blocksg[,c('tmar','tint'):=.('tmar=(65*12):(113*12)',113*12)]
blocksg[,c('acute.sc'):=.(5)]
load(file.path(out.dir,'CFJobsToDo.Rdata'))
blocksgTD <- blocksg[jobnum %in% jtd]
if(!file.exists(out.dir)) dir.create(out.dir) # create directory if necessary
if(!file.exists(file.path(out.dir,'Rdatas'))) dir.create(file.path(out.dir,'Rdatas')) # create directory if necessary
if(!file.exists(file.path(out.dir,'Routs'))) dir.create(file.path(out.dir,'Routs')) # create directory if necessary
sink("AssortHetHeatMap.txt") # create a control file to send to the cluster
for(ii in blocksgTD[,jobnum]) { #blocksgTD[,jobnum]) {
cmd <- "R CMD BATCH '--no-restore --no-save --args"
cmd <- addParm(cmd, blocksgTD, ii) ## remove lab since it has spaces & isn't used in psrun
cmd <- paste0(cmd, " ' SimulationStarter.R ", file.path(out.dir,'Routs', paste0('AHHsim', sprintf("%06d", ii),'.Rout')),
sep='')
cat(cmd) # add command
cat('\n') # add new line
}
sink()
save(blocksg, file = file.path(out.dir,'blocksg.Rdata')) # these are country-acute phase specific blocks
print(nrow(blocksg))
print(nrow(blocksgTD))
|
f6fd1826e043fd39b25e9ac007e176765710a905 | b12c8b99b619a1f084cc85ed316f287b2ef29274 | /boxer_data_cleaning.R | dc29d853443fb5011f7098d5ee6b15194a5a823f | [] | no_license | LocalSymmetry/ChampionBoxers | a8cc094b521f0e072a21d35bc01b54ac1aac9382 | 4eae55c17b39460b0a82fa089a9196389cc5bdcf | refs/heads/master | 2021-01-21T21:15:36.632241 | 2017-06-21T14:36:14 | 2017-06-21T14:36:14 | 94,803,710 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,215 | r | boxer_data_cleaning.R | # Data cleaning for "A Search for Champion Boxers".
# Data were scraped from BoxRec.com for title fights
# of all 5 major boxing organizations (WBO, WBA, WBC, IBO, IBF).
#
#
# Data source: BoxRec.com
# Raw variable information for each league
# division: weight class of title fight (17 factors)
# boxer1id: Boxrec.com id of the first listed boxer
# boxer1: name of the first listed boxer
# boxer2id: Boxrec.com id of the second listed boxer
# boxer2: name of the second listed boxer
# date: date of the title fight in "YYYY-MM-DD" format
# fightid: Boxrec.com id of the fight
# place: place the fight took place in
# WinLoss: determination of if boxer1 won against boxer2
# decision: how the match was decided
# othertext: other text placed with the match on BoxRec.com. Contains raw
# HTML.
library(dplyr)
library(tidyr)
# We will use readr due to names being encoded in UTF-8.
library(readr)
# Note, there will be duplicate matches in the data sets
# as title fights can be for multiple leagues.
wbo.df <- read_csv("Data/WBO.csv", na = c(""))
wba.df <- read_csv("Data/WBA.csv", na = c(""))
wbc.df <- read_csv("Data/WBC.csv", na = c(""))
ibo.df <- read_csv("Data/IBO.csv", na = c(""))
ibf.df <- read_csv("Data/IBF.csv", na = c(""))
# Data Preprocessing
# All five data sets have the same column information.
# We will concatenate all five data frames, and then use
# fightid to remove duplicate fights for title fights
# in multiple leagues at once.
boxing.df <- rbind(wbo.df, wba.df, wbc.df, ibo.df, ibf.df)
boxing.df <- boxing.df[!duplicated(boxing.df$fightid), ]
# Data Cleaning
# Remove the HTML from "othertext" column.
HTMLRemoval <- function(string){
outstring <- gsub("<.*?>", "", string)
# There needs to be a space after "title", or it runs into the next sentence.
outstring <- gsub("title", "title ", outstring)
return(outstring)
}
# First, we gather a list of boxer id's to scrape boxer information off of
# BoxRec.com
boxers <- stack(lapply(boxing.df[, c('boxer1id', 'boxer2id')], as.character))
boxers <- unique(boxers$values)
write.csv(boxers, "Data/BoxerIDs.csv", row.names = FALSE)
# Now we will clean the variables in the boxing.df data frame.
# First, we will remove rows for fights that have not yet been decided.
boxing.df <- boxing.df[!is.na(boxing.df$decision), ]
# We will now clean HTML.
# We will remove the HTML from the boxer ID's and only leave the number.
boxing.df$boxer1id <- gsub("http://boxrec.com/boxer/", "",
x = boxing.df$boxer1id)
boxing.df$boxer1id <- factor(boxing.df$boxer1id)
boxing.df$boxer2id <- gsub("http://boxrec.com/boxer/", "",
x = boxing.df$boxer2id)
boxing.df$boxer2id <- factor(boxing.df$boxer2id)
# We will do the same for the fight IDs.
boxing.df$fightid <- gsub("http://boxrec.com/show/", "",
x = boxing.df$fightid)
boxing.df$fightid <- factor(boxing.df$fightid)
boxing.df$othertext <- sapply(boxing.df$othertext, HTMLRemoval)
# We will now turn the date field into timestamps.
boxing.df$date <- as.Date(boxing.df$date, format="%Y-%m-%d")
# Fix Losses in WinLoss.
boxing.df$WinLoss <- sapply(boxing.df$WinLoss,
function(string) gsub("L ", "L", string))
boxing.df$WinLoss <- factor(boxing.df$WinLoss)
# Save a copy of the full unique data frame.
write_csv(boxing.df, "Data/AllLeagues.csv")
# Cleaning of Boxer Information Scrape
#
# Data source: www.BoxRec.com
# Raw variable information
# boxerid: Boxrec.com id of the boxer
# boxer: name of the boxer (has escape characters)
# attributetype: The type of attribute from the boxer's biography (raw HTML)
# attributevalue: The value of attribute from the boxer's biography (raw HTML)
# Remove escape characters and raw HTML.
AttributeClean <- function(string){
# Remove HTML
outstring <- gsub("<.*?>", "", string)
# Remove escape characters
outstring <- gsub("\n", "", outstring)
outstring <- gsub("\t", "", outstring)
outstring <- gsub("Â", "", outstring)
# Remove long spaces
outstring <- gsub(" ", "", outstring)
return(outstring)
}
# Clean length strings.
LengthClean <- function(string){
outstring <- gsub("½", ".5", string)
outstring <- gsub("cm", "", outstring)
outstring <- gsub("″", "", outstring)
outstring <- gsub("′", "", outstring)
outstring <- gsub("\\s", "", outstring)
return(outstring)
}
# Convert imperial height units to feet.
ImpToFeet <- function(string){
# The first character is feet, the remaining characters are inches.
outnumber <- (as.numeric(substring(string,1,1))
+ as.numeric(substring(string,3))/12.0)
return(outnumber)
}
# Strip the percent sign off of KOs.
StripPercent <- function(string){
outstring <- gsub("%", "", string)
return(outstring)
}
# Remove website url from boxerid.
CleanBoxerID <- function(boxerID){
return(gsub("http://boxrec.com/boxer/", "", boxerID))
}
boxer.info.df <- read_csv("Data/BoxerInfo.csv")
# Clean the boxername, attributetype, and attributevalue variables.
boxer.info.df$boxername <- sapply(boxer.info.df$boxername, AttributeClean)
boxer.info.df$attributetype <- sapply(boxer.info.df$attributetype,
AttributeClean)
boxer.info.df$attributevalue <- sapply(boxer.info.df$attributevalue,
AttributeClean)
# Convert the variables back to factors.
boxer.info.df$boxername <- factor(boxer.info.df$boxername)
boxer.info.df$attributetype <- factor(boxer.info.df$attributetype)
boxer.info.df$attributevalue <- factor(boxer.info.df$attributevalue)
# Save the clean version.
write_csv(boxer.info.df, "Data/BoxerInfoClean.csv")
# Using read.csv to change strings to NA.
boxer.info.df <- read_csv("Data/BoxerInfoClean.csv",
na = c("", "NA", " "))
# DROP NAs in attributetype.
boxer.info.df <- boxer.info.df[!is.na(boxer.info.df$attributetype), ]
# DROP NAs in attributevalue.
boxer.info.df <- boxer.info.df[!is.na(boxer.info.df$attributevalue), ]
# Resave the clean version sans NA's.
write_csv(boxer.info.df, "Data/BoxerInfoClean.csv")
# Now, we will make every attribute type its own column and aggregate the values
# for each boxer.
boxer.info.spread <- spread(boxer.info.df, attributetype, attributevalue)
# Convert bouts and rounds to numeric
boxer.info.spread$bouts <- as.numeric(boxer.info.spread$bouts)
boxer.info.spread$rounds <- as.numeric(boxer.info.spread$rounds)
# Clean boxerid so it is without the website handle.
boxer.info.spread$boxerid.num <- sapply(boxer.info.spread$boxerid, CleanBoxerID)
# Fix born and debut dates.
boxer.info.spread$debut <- as.Date(boxer.info.spread$debut, format="%Y-%m-%d")
boxer.info.spread$born <- as.Date(boxer.info.spread$born, format="%Y-%m-%d")
# Split variables with multiple forms of information.
# Death date also contains age at death. Two age values are "not known" which
# will be replaced with NA.
boxer.info.spread <- separate(boxer.info.spread, 'death date',
c("date.of.death", "age.of.death"), sep=" / age ")
boxer.info.spread$date.of.death <- as.Date(boxer.info.spread$date.of.death,
format="%Y-%m-%d")
boxer.info.spread$age.of.death <- as.numeric(boxer.info.spread$age.of.death)
# Reach has inches and cm.
boxer.info.spread <- separate(boxer.info.spread, reach,
c("reach.in", "reach.cm"), sep=" / ")
boxer.info.spread$reach.cm <- sapply(boxer.info.spread$reach.cm, LengthClean)
boxer.info.spread$reach.in <- sapply(boxer.info.spread$reach.in, LengthClean)
boxer.info.spread$reach.cm <- as.numeric(boxer.info.spread$reach.cm)
boxer.info.spread$reach.in <- as.numeric(boxer.info.spread$reach.in)
# Height has feet, inches, and cm.
boxer.info.spread <- separate(boxer.info.spread, height,
c("height.imp", "height.cm"), sep=" / ")
boxer.info.spread$height.cm <- sapply(boxer.info.spread$height.cm, LengthClean)
boxer.info.spread$height.imp <- sapply(boxer.info.spread$height.imp, LengthClean)
boxer.info.spread$height.imp <- sapply(boxer.info.spread$height.imp, ImpToFeet)
boxer.info.spread$height.cm <- as.numeric(boxer.info.spread$height.cm)
# Turn KOs into a percentage.
boxer.info.spread$KO.percent <- sapply(boxer.info.spread$KOs, StripPercent)
boxer.info.spread$KO.percent <- as.numeric(boxer.info.spread$KO.percent)/100
# Save this version.
write_csv(boxer.info.spread, "Data/BoxerInfoByAttributes.csv")
# Variable Information
# boxerid: BoxRec.com url for each boxer.
# boxername: The name of the boxer.
# alias: The alias used for the boxer in promotions.
# birth name: The given birth name for the boxer.
# birth place: The place the boxer was born.
# born: The year the boxer was born.
# bouts: The number of bouts a boxer has fought in.
# date.of.death: Date of boxer's death.
# age.of.death: Boxer's age at death.
# debut: Date the boxer debuted.
# division: Weight class of the boxer.
# global ID: The Global ID for the boxer.
# height.imp: The height of the boxer in feet.
# height.cm: The height of the boxer in cm.
# KOs: Character format for percentage of wins for a boxer that are KO's.
# manager/agent: Manager for the boxer.
# promoter: Promoter for the boxer.
# ranking: BoxRec rankings. This column has not been cleaned.
# reach.in: The length of outstretched arms measured from arm to arm in inches.
# reach.cm: The length of outstretched arms measured from arm to arm in cm.
# residence: Current place of residence.
# role: The role in boxing the boxer plays. May include Manager, Promoter, etc.
# rounds: The number of rounds the boxer has fought in.
# stance: The boxing stance style: orthodox or southpaw.
# titles held: A list of titles held by the boxer.
# US ID: The US ID for the boxer.
# VADA CBP: If the boxer is enrolled in an anti-doping organization.
# KO.percent: Numerical format for percentage of wins for a boxer that are KO's.
# boxerid.num: Numerical ID for the boxer. Aligned with BoxRec.com. |
2defde7738b77642d8bbc9261c0e2024a039c8c1 | bb397c245cbe62a30db267f0f8ea54f1fd164119 | /Rfunctions/GllimFitIID.R | b9c8eefd6974cd93b3ca0923bf43a9605e63c5c2 | [] | no_license | Trung-TinNGUYEN/GLLiM-ABC-v0 | 503b0734c6669d9aad40b041718bd19fe9b86bec | f30ed29982b7244fe3d505ae9fe17c81b17c91e8 | refs/heads/main | 2023-07-26T01:21:50.087487 | 2021-08-21T23:22:46 | 2021-08-21T23:22:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,390 | r | GllimFitIID.R | GllimFitIID<-function(thetadata, ydata,iR, K, constr=list(Sigma="")){
# %%%%%%%% Gaussian Locally Linear Mapping and pre-computed quantities %%%%%%%%%
# % Description: Fit a GLLiM for iR IID data model using the xLLiM package with constraints
# % cstr= constr on covariance matrices Sigmak, can be
# constr=list(Sigma="") for FULL covariances Sigmak (DEFAULT)
# Remark: this constraint is almost equivalent to bloc diagonal covariances with blocs
# of size D if y is of size DxiR.
# constr = list(Sigma="d") for diagonal covariances
# constr = list(Sigma="i") for isotropic covariances (but for this one, used the ISO version in the standard GLLiM, faster)
# etc... see xlllim: 'd'=diag., 'i'=iso., '*'=equal for all k, 'v'=equal det. for all k
# using N associated observations
# % ydata and parameters are in thetadata.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%% Input %%%%. NOTE that R=iR here
# %- thetadata (LxN) % N training parameters values (L)
# %- ydata (DRxN) % N training observations of dimension DR (the R iid obi of dim D are stacked at first)
#### DR x N , R iid replications (R=iR just because R is already used)
#### iR = number of iid replications in each sample
# %- K (integer) % number of components in the mixtures
# %%%% Output %%%%
# % -mod : the output of the gllim function in xllim that contains
# % the parameter Psi: mod$c, etc...
# % - c (LxK) % Gaussian means of theta
# % - Gamma (LxLxK) % Gaussian covariances of theta
# % - pi (1xK) % Gaussian weights
# % - A (DxLxK) % Affine transformation matrices
# % - b (DxK) % Affine transformation vectors
# % - Sigma (DxDxK) % Error covariances assumed FULL here
# % -invGamma (LxLxK) % inverses of matrices Gammak , do not depend on y or z
# % -invSigma DxDxK inverses of Sigmak, do not depend on y or z
##### % -covstar (LxLxK) % matrices Sigma_k^* covariance matrices of the posterior
##### the log determinants can be precomputed too
##### the Sigmak* and log det Vk in the iid case, quantites independent of y
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# modified fromm xllim to provide a GLLiM iid estimation of the direct
# parameters, constraint on covariances can be chosen here
# NB:
# The case D=1 has to be treated separately due to the fact that 1xM matrices are turned into
# a vector by R.. To be merged into a single function later
# gllimIID1D not checked, working?
D=nrow(ydata)/iR
if (D==1) {resgllim <- gllimIID1D(thetadata, ydata,iR, in_K=K,cstr=constr,verb=0)
# pre-computation (to save time) of quantities that are independant on y (or z),
# same covstar and log det and inverse Gamma matrices for all y
Aa<-resgllim$A # D x L x K
L<-dim(Aa)[2]
#D<-dim(Aa)[1]
# in case some clusters disappear...
K<-dim(Aa)[3]
#K<-dim(Aa)[3]
Sigmaa<-resgllim$Sigma
Gammaa<-resgllim$Gamma
invSigmaa<-Sigmaa
invGammaa<-Gammaa
covstarRa<-Gammaa
# just for initial allocation
# dim L x L x K
logdetVa<-NULL
# inverse of Gammak and Sigmakstar and its log-determinant
for (k in 1:K){
invGammaa[,,k]<-chol2inv(chol(Gammaa[,,k]))
invSigmaa[,,k]<-chol2inv(chol(Sigmaa[,,k]))
# attention numerical issue when mat non symmetric before inversion
# no transpose because D=1 make it a vector anyway
tempMat<-iR*Aa[,,k]%*%t(Aa[,,k])/invSigmaa[,,k]
covstarRa[,,k]=chol2inv(chol(invGammaa[,,k]+tempMat))
#tempMat<-diag(L)+iR*t(Aa[,,k])%*%invSigmaa[,,k]%*%Aa[,,k]%*%Gammaa[,,k]
##covstarRa[,,k]=chol2inv(chol(invGammaa[,,k]+ R*t(Aa[,,k])%*%Aa[,,k]/Sigmaa[1,1,k]))
#covstarRa[,,k]=Gammaa[,,k]%*%chol2inv(chol(tempMat))
dettemp<-det(diag(L)+tempMat%*%Gammaa[,,k])
# since Sigma is 1x1
logdetVa<-c(logdetVa, iR*log(Sigmaa[,,k])+log(dettemp))
#logdetVa<-c(logdetVa, iR*log(det(Sigmaa[,,k]))+log(dettemp))
} # end for
} # end if
else{
resgllim <- gllimIID(thetadata, ydata,iR, in_K=K,cstr=constr,verb=0)
# pre-computation (to save time) independant on y (or z),
# same covstar and log-determinant and inverse Gamma matrices for all y
Aa<-resgllim$A
L<-dim(Aa)[2]
D<-dim(Aa)[1]
# in case some cluster disappears...
K<-dim(Aa)[3]
#K<-dim(Aa)[3]
Sigmaa<-resgllim$Sigma
Gammaa<-resgllim$Gamma
invSigmaa<-Sigmaa
invGammaa<-Gammaa
covstarRa<-Gammaa
# just for initial allocation
# dim L x L x K
logdetVa<-NULL
# inverse of Gammak and Sigmakstar and its log det
for (k in 1:K){
invGammaa[,,k]<-chol2inv(chol(Gammaa[,,k]))
invSigmaa[,,k]<-chol2inv(chol(Sigmaa[,,k]))
# attention numerical issue when mat non symmetric before inversion
tempMat<-iR*t(Aa[,,k])%*%invSigmaa[,,k]%*%Aa[,,k]
covstarRa[,,k]=chol2inv(chol(invGammaa[,,k]+tempMat))
#tempMat<-diag(L)+iR*t(Aa[,,k])%*%invSigmaa[,,k]%*%Aa[,,k]%*%Gammaa[,,k]
##covstarRa[,,k]=chol2inv(chol(invGammaa[,,k]+ R*t(Aa[,,k])%*%Aa[,,k]/Sigmaa[1,1,k]))
#covstarRa[,,k]=Gammaa[,,k]%*%chol2inv(chol(tempMat))
dettemp<-det(diag(L)+tempMat%*%Gammaa[,,k])
logdetVa<-c(logdetVa, iR*log(det(Sigmaa[,,k]))+log(dettemp))
}
} # end else
list("mod"=resgllim, "invGamma"=invGammaa ,"invSigma"=invSigmaa , "covstarR"=covstarRa,"logdetVR"=logdetVa)
}
|
b6bbce5339dfdaa0e8a666675ce1065bbaa009a5 | a7cef5b06a271bbe30affa1b45235ae2e814b87b | /man/xcusum.sf.Rd | cc96192831f4b93500a3e7ba15e4a4ad21d0c2a3 | [] | no_license | cran/spc | 23075576d12adf3641607accce3e2d4149d55e5c | 296041a2d3a3b082415fa676bceaded3f0d39f08 | refs/heads/master | 2022-11-10T04:04:21.939750 | 2022-10-24T11:30:02 | 2022-10-24T11:30:02 | 17,699,981 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,741 | rd | xcusum.sf.Rd | \name{xcusum.sf}
\alias{xcusum.sf}
\title{Compute the survival function of CUSUM run length}
\description{Computation of the survival function of the Run Length (RL) for CUSUM control charts monitoring normal mean.}
\usage{xcusum.sf(k, h, mu, n, hs=0, sided="one", r=40)}
\arguments{
\item{k}{reference value of the CUSUM control chart.}
\item{h}{decision interval (alarm limit, threshold) of the CUSUM control chart.}
\item{mu}{true mean.}
\item{n}{calculate sf up to value \code{n}.}
\item{hs}{so-called headstart (enables fast initial response).}
\item{sided}{distinguishes between one- and two-sided CUSUM control chart by choosing \code{"one"} and \code{"two"}, respectively.}
\item{r}{number of quadrature nodes, dimension of the resulting linear equation system is equal to \code{r+1}.}
}
\details{
The survival function P(L>n) and derived from it also the cdf P(L<=n) and the pmf P(L=n) illustrate
the distribution of the CUSUM run length. For large n the geometric tail could be exploited. That is,
with reasonable large n the complete distribution is characterized.
The algorithm is based on Waldmann's survival function iteration procedure.
}
\value{Returns a vector which resembles the survival function up to a certain point.}
\references{
K.-H. Waldmann (1986),
Bounds for the distribution of the run length of one-sided and two-sided CUSUM quality control schemes,
\emph{Technometrics 28}, 61-67.
}
\author{Sven Knoth}
\seealso{
\code{xcusum.q} for computation of CUSUM run length quantiles.
}
\examples{
## Waldmann (1986), one-sided CUSUM, Table 2
k <- .5
h <- 3
mu <- 0 # corresponds to Waldmann's -0.5
SF <- xcusum.sf(k, h, 0, 1000)
plot(1:length(SF), SF, type="l", xlab="n", ylab="P(L>n)", ylim=c(0,1))
#
}
\keyword{ts}
|
94df418cd249f58467c2f67b961b7db96182b0b7 | 585a9c9b373cc7e0e048201f3d78ebf26b303e3d | /bin/sorts.R | e9a0fc098301860498ddaad3a6620df2b1b87413 | [] | no_license | rlowe/stalk2graph | 80cf07c852c012cb282ab2d7f8e3fa17324b12b9 | 11cc70b0a85d32b46efdf792e6b27151e9bfdd37 | refs/heads/master | 2020-04-10T16:29:30.567014 | 2016-05-01T06:56:50 | 2016-05-01T06:56:50 | 8,306,354 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,714 | r | sorts.R | args<-commandArgs(trailingOnly = FALSE)
require(ggplot2)
sorts_data<-read.csv(args[length(args)-1])
smp<-diff(sorts_data$merge_passes)
srange<-diff(sorts_data$range)
srows<-diff(sorts_data$rows)
ss<-diff(sorts_data$scan)
sorts_data<-data.frame(Time="", Type="", Value=as.numeric(""))
sorts_data<-sorts_data[-1,]
for (i in 1:length(ss)) {
sorts_data<-rbind( sorts_data,
data.frame(Time=as.character(i),
Type="Merge Passes",
Value=as.numeric(smp[i])))
sorts_data<-rbind( sorts_data,
data.frame(Time=as.character(i),
Type="Range",
Value=as.numeric(srange[i])))
sorts_data<-rbind( sorts_data,
data.frame(Time=as.character(i),
Type="Rows",
Value=as.numeric(srows[i])))
sorts_data<-rbind( sorts_data,
data.frame(Time=as.character(i),
Type="Scan",
Value=as.numeric(ss[i])))
}
png(args[length(args)],width=800)
options(scipen=20)
ggplot(sorts_data, aes(x=Time, y=Value, fill=Type, color=Type, group=Type, xaxt='n')) + geom_area(position = "stack", stat="identity") + ggtitle("MySQL Sorts") + theme(axis.title.y = element_blank(), axis.title.x = element_blank(), axis.text.x = element_blank(), axis.ticks = element_blank(), panel.grid.major = element_blank(), panel.grid.minor=element_blank())
dev.off()
|
a0b1e3ea0f3e244a96c0f57cf7342ac91eb79f00 | 8117e00c26fd2906e1542aec58a88c3967c2fb2d | /tests/testthat/test-makeSuggList.R | 0c84f5e17fc2dceb90026ba110025dc355d4785e | [] | no_license | RGLab/corpusFreq | fbf280fa91f5905d2a5c5af12f6dbef11458cd50 | f44551c40ab82934db3496d135faa00316f8219e | refs/heads/main | 2021-05-02T15:06:50.119428 | 2021-04-26T20:46:49 | 2021-04-26T20:46:49 | 120,689,873 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,431 | r | test-makeSuggList.R | # Helpers-------------------------------------------------------
tmp <- c("lymphocyte is a dose of reality in a harsh world.",
"Doses would be too much.",
"And is critical for this test.")
freqTbl <- makeFreqTbl(tmp)
getRes <- function(vec, freqTbl){
metaData <- list()
metaData$medLength <- stats::median(sapply(vec, nchar))
metaData$numStrings <- length(vec)
words <- vec2words(vec)
res <- corpusFreq:::makeSuggList(words = words, freqTbl = freqTbl, metaData = metaData)
}
# Tests --------------------------------------------------------
context("makeSuggList")
test_that("Many short strings use internalFt with stopwords", {
vec <- c("1 dose", "2 doses", "5 doses", "3 doses", "1 does", "1 dose", "4 doses")
res <- getRes(vec, freqTbl)
expect_true("dose" %in% res$does)
})
test_that("Corrects non-freqTbl but regular english word. i.e. 'adn' to 'and'", {
vec <- c("the fox adn the hen are not friends.")
res <- getRes(vec, freqTbl)
expect_true("and" %in% res$adn)
})
test_that("Corrects biological word present in freqTbl", {
vec <- c("Why did the lymphocyt cross the road?")
res <- getRes(vec, freqTbl)
expect_true("lymphocyte" %in% res$lymphocyt)
})
test_that("Single string with few words, no internalFt work", {
vec <- c("Why did the lymphocyt cross the road?")
res <- getRes(vec, freqTbl)
expect_false("why" %in% names(res) )
})
|
b71c02919c11019befce3bd7e8a147bd8a6b14d4 | 1b388061103d48f7e9fab752ff794e16222a3116 | /R/measure_missing.R | 73149ab8882c2b502cdc43090ed8f7229960adea | [] | no_license | neyhartj/gws | b5fc9257075a162fadebd8ea6a3634638555e929 | cb52791be50d96b3cb4b733c3aa08476b7c05249 | refs/heads/master | 2021-01-19T06:46:14.146652 | 2020-03-12T21:25:46 | 2020-03-12T21:25:46 | 63,906,263 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 252 | r | measure_missing.R | #' Measure the missingness of a SNP or entry
#'
#'
measure.missing <- function(x, type = "numeric") {
if (type == "numeric") {
return( sum(is.na(x)) / length(x) )
}
if (type == "nucleotide") {
return( sum(x == "NN") / length(x) )
}
}
|
2ff2a2d7f46bb7bfa07e1ab0fce9258e8baa8bf2 | 5ba559ffe9bea4744986265aaf0abb900f318232 | /R/pruning.r | 3b7c1ac9cf73907e4ec6f5edbabe23916a88476a | [
"BSD-3-Clause"
] | permissive | BenJamesbabala/autoBagging | e021721b5467f5bff267279256e76692912ffd44 | af6bca0552917953555c778e3c4964d41698f65f | refs/heads/master | 2020-12-02T12:46:51.029484 | 2017-06-23T12:45:35 | 2017-06-23T12:45:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,694 | r | pruning.r | #' Boosting-based pruning of models
#'
#' @param form formula
#' @param preds predictions in training data
#' @param data training data
#' @param cutPoint ratio of the total number of models to keepFORA
#'
#' @export
bb <- function (form, preds, data, cutPoint) {
class <- get_target(form)
prunedN <- ceiling(ncol(preds) - (ncol(preds) * cutPoint))
weights <- rep(1/nrow(data), nrow(data))
ordem <- NULL
for (l in 1:prunedN) {
errors <- apply(preds, 2, function(x) {sum(((!(x == data[,class])) * 1) * weights)})
# hammer time! works fine, though
errors[ordem] <- max(errors) * 2
ordem[l] <- which.min(errors)
errorU <- min(errors)
predU <- preds[,ordem[l]] == data[,class]
if (errorU > 0.5) {
weights <- rep(1/nrow(data), nrow(data))
} else {
for (w in 1:length(weights)) {
weights[w] <- ifelse(predU[w], weights[w] / (2*errorU) , weights[w] / (2 * (1-errorU)) )
}
}
}
return(ordem)
}
#' MDSQ
#'
#' @inheritParams bb
#'
#' @export
mdsq <- function (form, preds, data, cutPoint) {
class <- get_target(form)
prunedN <- ceiling(ncol(preds) - (ncol(preds) * cutPoint))
ordem <- as.vector(NULL)
pred <- ifelse(preds == data[,class], 1, -1)
ens <- rep(0, length(data[,class]))
colnames(pred) <- 1:ncol(pred)
o <- rep(0.075, length(data[,class]))
for (l in 1:prunedN) {
dist <- apply(as.matrix(pred), 2, function (x) {sqrt( sum( ( ((x+as.vector(ens))/l) - o )^2 ) )})
ens <- as.matrix(ens) + as.matrix(pred[,c(names(which.min(dist)))])
pred <- as.matrix(pred[,setdiff(colnames(pred),names(which.min(dist)))])
ordem[l] <- as.integer(names(which.min(dist)))
}
return(ordem)
}
|
d7bd4f0fccf05846848dd4c20aa8a8e475a9566f | 6a2f6ab46c35441db0288fbde4be1a5188f2ec30 | /R/ti_celltrails.R | 4a780d652ba02f2e91aa5a239f4ef99cf5c3dc77 | [] | no_license | herrinca/dynmethods | f7595c8ce4f06cb2cb4b809c49ceebd705330940 | 0a5768cf4452b2b745ee675bbd013140d54029da | refs/heads/master | 2020-03-26T22:19:11.513964 | 2018-08-21T18:03:51 | 2018-08-21T18:03:51 | 145,448,352 | 0 | 0 | null | 2018-08-20T17:17:18 | 2018-08-20T17:17:18 | null | UTF-8 | R | false | false | 3,331 | r | ti_celltrails.R | ######################################### DO NOT EDIT! #########################################
#### This file is automatically generated from data-raw/2-generate_r_code_from_containers.R ####
################################################################################################
#' @title Inferring a trajectory inference using CellTrails
#'
#' @description
#' Will generate a trajectory using
#' [CellTrails](https://doi.org/10.1016/j.celrep.2018.05.002).
#'
#' This method was wrapped inside a
#' [container](https://github.com/dynverse/dynmethods/tree/master/containers/celltrails).
#' The original code of this method is available
#' [here](https://github.com/dcellwanger/CellTrails).
#'
#' @references Ellwanger, D.C., Scheibinger, M., Dumont, R.A., Barr-Gillespie,
#' P.G., Heller, S., 2018. Transcriptional Dynamics of Hair-Bundle Morphogenesis
#' Revealed with CellTrails. Cell Reports 23, 2901–2914.e14.
#'
#' @param threshold_dl integer; Minimum number of samples; if value < 1 it is
#' interpreted as fraction, otherwise as absolute sample count (default: `2L`;
#' range: from `0L` to `100L`)
#' @param threshold_cov numeric; Minimum coefficient of variation; numeric value
#' between 0 and 1 (default: `0.05`; range: from `0L` to `1L`)
#' @param threshold_ff numeric; A Z-score cutoff (default: `1.7`; range: from `0L`
#' to `5L`)
#' @param min_expr numeric; Minimum average feature expression (default: `0L`;
#' range: from `0L` to `2L`)
#' @param frac numeric; Fraction or number (if frac > 1) of eigengaps used to
#' perform linear fit. (default: `100L`; range: from `1L` to `1000L`)
#' @param min_size numeric; The initial cluster dedrogram is cut at an height such
#' that the minimum cluster size is at least min_size; if min_size < 1 than the
#' fraction of total samples is used, otherwise it is used as absoulte count
#' (default: `0.01`; range: from `0.001` to `1L`)
#' @param min_feat integer; Minimum number of differentially expressed features
#' between siblings. If this number is not reached, two neighboring clusters
#' (siblings) in the pruned dendrogram get joined. (default: `5L`; range: from
#' `1L` to `100L`)
#' @param max_pval numeric; Maximum P-value for differential expression
#' computation. (default: `1e-04`; range: from `1e-07` to `1L`)
#' @param min_fc numeric; Mimimum fold-change for differential expression
#' computation (default: `2L`; range: from `0L` to `5L`)
#' @param l integer; Neighborhood size (default: `10L`; range: from `1L` to `50L`)
#' @inheritParams dynwrap::create_container_ti_method
#'
#' @return A TI method wrapper to be used together with
#' \code{\link[dynwrap:infer_trajectories]{infer_trajectory}}
#' @export
ti_celltrails <- function(
threshold_dl = 2L,
threshold_cov = 0.05,
threshold_ff = 1.7,
min_expr = 0L,
frac = 100L,
min_size = 0.01,
min_feat = 5L,
max_pval = 1e-04,
min_fc = 2L,
l = 10L,
run_environment = NULL
) {
create_container_ti_method(
docker_repository = "dynverse/celltrails",
run_environment = run_environment,
threshold_dl = threshold_dl,
threshold_cov = threshold_cov,
threshold_ff = threshold_ff,
min_expr = min_expr,
frac = frac,
min_size = min_size,
min_feat = min_feat,
max_pval = max_pval,
min_fc = min_fc,
l = l
)
}
|
7a529848fb404863a72c8e3114929f9b03c1542d | 2c4de2241e8567a64987ab99983c49447691cff8 | /Samsung Smartphone Data Project/Samsung Data Code.R | b3def8fd46f801f3af6a43cf2b453c5c9d48b5a9 | [] | no_license | BenPiggot/Projects | 53b47081f7912d22d9cdeee9981dfcbc4db0a95a | 5dbb812b8bdf4bbf079130d9bb96fbbc2136f5b4 | refs/heads/master | 2020-05-30T23:01:02.807114 | 2014-06-24T05:26:02 | 2014-06-24T05:26:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,938 | r | Samsung Data Code.R | # Set working directory
setwd("~/Desktop/UCI HAR Dataset/")
# Read in "Features.txt" file and clean the variable labels with the sub function.
# This file will be used as the column labels for my new data frame.
features_df <- read.table("~/Desktop/UCI HAR Dataset/features.txt")
features <- as.character(features_df[,2])
install.packages("gsubfn")
library(gsubfn)
features <- gsub("[^[:alnum:]]","", features)
features <- gsub(pattern="mean", replacement=".mean", features)
features <- gsub(pattern="std", replacement=".std", features)
# Read in other relevant files from working directory.
# These files will provide the subject IDs, activities, and data
# for my new data frame.
subject_test_data <- read.table("./test/subject_test.txt")
subject_train_data <- read.table("./train/subject_train.txt")
xtest_data <- read.table("./test/X_test.txt")
xtrain_data <- read.table("./train/X_train.txt")
ytest_data <- read.table("./test/y_test.txt")
ytrain_data <- read.table("./train/y_train.txt")
# Bind togther the data, subject IDs, and activities for the "test"
# portion of the experiment. Then affix columns names from the "features"
# file cleaned above.
test_data <- cbind(ytest_data, subject_test_data, xtest_data)
names(test_data) <- c("activity", "subject", features)
# Bind togther the data, subject IDs, and activities for the "train"
# portion of the experiment. Then affix columns names from "features"
# file cleaned above.
train_data <- cbind(ytrain_data, subject_train_data, xtrain_data)
names(train_data) <- c("activity", "subject", features)
# Bind together the "test" and "train" sub data sets into a new data frame
# called "samsung_data".
samsung_data <- rbind(test_data, train_data)
# Subset the "samsung_data" data frame using the grep function to select
# only those columns measuring "mean" or "std" (standard deviation).
mean_data <- samsung_data[,grep("mean",colnames(samsung_data))]
std_data <- samsung_data[,grep("std",colnames(samsung_data))]
# Recreate the "subject" and activity" columns from the "samsung_data"
# data frame that were filtered out by the grep function.
ysubject_data <- data.frame(rbind(cbind(ytest_data, subject_test_data),
cbind(ytrain_data, subject_train_data)))
# Bind the subject and activity mini data frame with the subsetted "mean"
# and "std" data frames subsetted above using the grep function. This data frame -
# temporarily titled "mean_std_data" - is the basis for my final "tidy data" data frame.
mean_std_data <- cbind(ysubject_data, mean_data, std_data)
# Rename the the first and second columns in the "mean_std_data" data frame
names(mean_std_data)[1] <- "activity"
names(mean_std_data)[2] <- "subject"
# Provide meaningful names for the "activity" variable using the "feature_info.txt"
# file as a guide. This .txt file is found in the working directory (but is not read into R).
mean_std_data$activity[mean_std_data$activity==1] <- "walking"
mean_std_data$activity[mean_std_data$activity==2] <- "walking downstairs"
mean_std_data$activity[mean_std_data$activity==3] <- "walking upstairs"
mean_std_data$activity[mean_std_data$activity==4] <- "sitting"
mean_std_data$activity[mean_std_data$activity==5] <- "standing"
mean_std_data$activity[mean_std_data$activity==6] <- "laying"
# Re-number the mean_std_data rows to start at 1.
row.names(mean_std_data) <- 1:nrow(mean_std_data)
# Create the final "tidy.data" data frame using the aggregate function.
tidy.data <- aggregate(. ~ activity + subject, data=mean_std_data, mean)
# Reorder the "tidy.data" data frame's columns so that "subject" heads the first column
# variable and "acitivity" heads the second column
tidy.data <- tidy.data[,c(2,1,3:81)]
# Install xlsx package and write the "tidy.data" data frame as Excel file.
install.packages("xlsx")
library(xlsx)
write.xlsx(x = tidy.data, file = "Human Activity as Measured by Samsung Smartphone.xlsx", row.names = FALSE)
|
6cd39a04cdc9b16a00e82e165798373156bbf0d7 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /meteor/inst/testfiles/E_Penman/libFuzzer_E_Penman/E_Penman_valgrind_files/1612738781-test.R | 114b208e3f6b787a79fef2c2d246c8d4d95379d2 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 605 | r | 1612738781-test.R | testlist <- list(Rext = numeric(0), Rs = NaN, Z = c(NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 2.96763823300824e+280, 3.65588327285767e+233, 4.71235854849405e+257, 1.0639991435071e+248, NaN, 4.78479882533389e-304, 5.44361528587885e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), alpha = numeric(0), atmp = NaN, relh = NaN, temp = c(NaN, NaN), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) |
8e79d4dd686c3f9243491bfebb348d8aba71484d | 6b4fe2baa84e74af637f319ea5d887cb2fd6f9a2 | /kevin/rimod-analysis/rnaseq/burgholderia_infection_analysis.R | 65a604f46174d5d2b6391d0af32dcd35f83f449c | [] | no_license | dznetubingen/analysis_scripts | 1e27ca43a89e7ad6f8c222507549f72b1c4efc20 | 4fcac8a3851414c390e88b4ef4ac461887e47096 | refs/heads/master | 2021-06-25T10:47:40.562438 | 2021-01-04T16:02:34 | 2021-01-04T16:02:34 | 187,789,014 | 1 | 0 | null | 2020-09-03T11:37:25 | 2019-05-21T07:55:17 | Jupyter Notebook | UTF-8 | R | false | false | 11,403 | r | burgholderia_infection_analysis.R | ###########################################################
# Analysis of Salmon quantified RiMod frontal RNA-seq data
# !!! Analysis of Pathogengroup with apparenty Burgholderia infection !!!
#
#
##########################################################
library(tximport)
library(DESeq2)
library(GenomicFeatures)
library(stringr)
library(pheatmap)
library(IHW)
library(biomaRt)
library(fgsea)
setwd("~/rimod/RNAseq/results_salmon/")
#### Hard-coded section
script_name = "rnaseq_salmon_analysis_rimod_frontal.R"
date = Sys.Date()
current_time = gsub(":", ".", gsub(" ", "_", Sys.time()))
####
# parameters parsing
row_sum_cutoff = 10
row_sum_samples_nr = 5
metadata = "/home/kevin/rimod/files/FTD_Brain.csv"
analysis_dir = "/home/kevin/rimod/RNAseq/analysis/"
region <- "fro"
salmon_files = "/home/kevin/rimod/RNAseq/analysis/txi_salmon/frontal_lengthScaledTPM_counts.txt"
#====================================================================#
# set working directory
setwd(analysis_dir)
# Create sub-folder for current analysis
dir.create(paste("burgholderia_analysis","_",region, "_", current_time, sep=""))
setwd(paste("burgholderia_analysis", "_", region, "_",current_time, sep=""))
# Save parameters in config file
params <- c(current_time, as.character(row_sum_cutoff), metadata, salmon_files, analysis_dir, script_name, region)
param.names <- c("Time", "Row_sum_cutoff", "Metadata", "salmon_files", "Analysis_directory", "Script_name", "Region")
params <- data.frame(param.name = param.names, param = params)
write.table(params, paste("config_file", current_time, sep="_"), quote = F, row.names = F, sep="\t")
#=================================================================#
# Load metadata
md <- read.csv(metadata, stringsAsFactors = FALSE)
md$SAMPLEID <- as.character(sapply(md$SAMPLEID, function(x){strsplit(x, split="_")[[1]][[1]]}))
md$SAMPLEID <- str_pad(md$SAMPLEID, width = 5, side = "left", pad = "0") # fill sample ids to 5 digits
# load counts
cts <- read.table(salmon_files, sep="\t", header=T, row.names=1)
# bring counts and md in similar format
rna.samples <- as.character(sapply(colnames(cts), function(x){strsplit(x, split="_")[[1]][[1]]}))
rna.samples <- str_pad(gsub("X", "", rna.samples), width=5, side='left', pad='0')
md <- md[md$SAMPLEID %in% rna.samples,]
md <- md[match(rna.samples, md$SAMPLEID),]
#### REMOVE ALL SPORADIC CASES ####
disease.codes <- c("FTD-C9", "FTD-MAPT", "FTD-GRN", "control")
keep <- md$DISEASE.CODE %in% disease.codes
md <- md[keep,]
# subset TXI
cts <- cts[,keep]
# remove sample 05180 (new Analysis 14.01.2020)
keep <- !grepl("5108", colnames(cts))
md <- md[keep,]
cts <- cts[,keep]
md$DISEASE.CODE <- gsub("-", "_", md$DISEASE.CODE) # make disease code names safe
# Split Age covariate into bins
age_bins = 4
md$AGE.BIN <- make.names(cut(md$AGE, breaks=age_bins))
# pmd
md$PMD.MIN. <- as.numeric(md$PMD.MIN.)
pmd.mean <- mean(na.omit(md$PMD.MIN.))
md$PMD.MIN.[is.na(md$PMD.MIN.)] <- pmd.mean
md$pmd <- md$PMD.MIN.
# PH
ph <- as.numeric(md$PH)
ph.mean <- mean(na.omit(ph))
ph[is.na(ph)] <- ph.mean
md$PH <- ph
## Make Burgholderia infected group
burgholderia <- c("04245", "09070", "07106", "09218", "09126", "10058", "02218", "10200", "97231" , "11054")
md$BS <- rep("free", nrow(md))
md$BS[md$SAMPLEID %in% burgholderia] <- "infected"
## Remove all controls
keep <- !md$DISEASE.CODE == "control"
md <- md[keep,]
cts <- cts[,keep]
#===========================================#
# DESeq2 analysis
# Generate DDS object
cts <- round(cts) # round to integer counts
dds <- DESeqDataSetFromMatrix(cts,
colData = md,
design = ~ PH + GENDER + BS)
# Specify control group
dds$BS <- relevel(dds$BS, ref = "free")
# apply prefiltering
dds <- estimateSizeFactors(dds)
keep <- rowSums((counts(dds, normalized=TRUE) >= row_sum_cutoff)) >= row_sum_samples_nr
dds <- dds[keep,]
# Run DESeq
dds <- DESeq(dds)
resnames <- resultsNames(dds)
#== Extract results ==#
pval_cut <- 0.05
### infected - free
res <- results(dds, c("BS", "infected", "free"), filterFun = ihw)
res <- na.omit(res)
rownames(res) <- str_split(rownames(res), pattern="[.]", simplify = T)[,1]
deg <- res[res$padj <= pval_cut,]
print(dim(deg))
###########
## Save results
# Adjust rownames
write.table(res, paste("deseq_result_infected.free", "_", region, "_",current_time, ".txt", sep=""), sep="\t", quote=F, col.names = NA)
# Save only significant genes for online tools
write.table(rownames(deg), paste("DEGs_infected.free", "_", region, "_",current_time, ".txt", sep=""), sep="\t", quote=F, row.names=F)
# Divde in up and down regulated genes
# MAPT
mapt.up <- deg[deg$log2FoldChange > 0,]
mapt.down <- deg[deg$log2FoldChange < 0,]
write.table(rownames(mapt.up), "DEGs_UP_mapt.ndc.txt", quote=F, row.names=F)
write.table(rownames(mapt.down), "DEGs_Down_mapt.ndc.txt", quote=F, row.names=F)
########################################
## Generate count table and vst table
########################################
# normalized count values
norm.counts <- counts(dds, normalized=TRUE)
write.table(norm.counts, paste("deseq_normalized_counts", "_", current_time, ".txt", sep=""), sep="\t", quote=F, col.names = NA)
# reg log transformed values
rld <- vst(dds, blind=FALSE)
rld.mat <- assay(rld)
write.table(rld.mat, paste("deseq_vst_values","_", current_time, ".txt", sep=""), sep="\t", quote=F, col.names = NA)
################################
## Plotting section ############
## PCA
pca <- plotPCA(rld, intgroup = "DISEASE.CODE")
png(paste("pca_group_deseq_rLogvals", "_", current_time, ".png", sep=""), width = 1200, height = 900)
pca
dev.off()
pca <- plotPCA(rld, intgroup = "GENDER")
png(paste("pca_gender_deseq_rLogvals", "_", current_time, ".png", sep=""), width = 1200, height = 900)
pca
dev.off()
##### HEATMAPs ################
## MAPT
mapt.vst <- rld.mat[rownames(deg.mapt),]
pheatmap(mapt.vst, scale="row")
#==============================#
# Make more PCAs
# separate PCAs for the different mutation
library(factoextra)
dc <- md$DISEASE.CODE
mapt.rld <- rld.mat[,dc %in% c('control', 'FTD_MAPT')]
grn.rld <- rld.mat[,dc %in% c('control', 'FTD_GRN')]
c9.rld <- rld.mat[,dc %in% c('control', 'FTD_C9')]
# MAPT - control
mapt.pca <- prcomp(t(mapt.rld), retx=T)
mapt.dc <- dc[dc %in% c('control', 'FTD_MAPT')]
mapt.gene <- md$GENE[dc %in% c('control', 'FTD_MAPT')]
mapt.gene[mapt.dc == 'control'] <- 'control'
fviz_eig(mapt.pca)
mapt.x <- as.data.frame(mapt.pca$x)
mapt.x$Disease_code <- mapt.gene
mpca <- ggplot(mapt.x, aes(x=PC1, y=PC2, color=Disease_code)) +
geom_point(size=3) +
stat_ellipse()
png(paste("pca_mapt_rlog", "_", current_time, ".png", sep=""), width = 1200, height = 900)
mpca
dev.off()
# GRN - control
grn.pca <- prcomp(t(grn.rld), retx=T)
grn.dc <- dc[dc %in% c('control', 'FTD_GRN')]
fviz_eig(grn.pca)
grn.x <- as.data.frame(grn.pca$x)
grn.x$Disease_code <- grn.dc
gpca <- ggplot(grn.x, aes(x=PC1, y=PC2, color=Disease_code)) +
geom_point(size=3) +
stat_ellipse()
png(paste("pca_grn_rlog", "_", current_time, ".png", sep=""), width = 1200, height = 900)
gpca
dev.off()
# C9 - control
c9.pca <- prcomp(t(c9.rld), retx=T)
c9.dc <- dc[dc %in% c('control', 'FTD_C9')]
fviz_eig(c9.pca)
c9.x <- as.data.frame(c9.pca$x)
c9.x$Disease_code <- c9.dc
cpca <- ggplot(c9.x, aes(x=PC1, y=PC2, color=Disease_code)) +
geom_point(size=3) +
stat_ellipse()
png(paste("pca_c9orf72_rlog", "_", current_time, ".png", sep=""), width = 1200, height = 900)
cpca
dev.off()
# PCA for all samples
all.pca <- prcomp(t(rld.mat), retx = T)
fviz_eig(all.pca)
all.x <- as.data.frame(all.pca$x)
all.x$Disease_code <- dc
pca <- ggplot(all.x, aes(x=PC1, y=PC2, color=Disease_code)) +
geom_point(size=3)
pca
#####################################
## fGSEA analysis
#####################################
ensembl <- useMart("ensembl", dataset = "hsapiens_gene_ensembl")
pathways <- gmtPathways("~/resources/genesets/h.all.v6.1.entrez.gmt")
pval_filter <- 0.05
## MAPT FGSEA
mapt <- as.data.frame(res.mapt)
bm <- getBM(attributes = c("ensembl_gene_id", "entrezgene_id"), filters = "ensembl_gene_id", values = rownames(mapt), mart = ensembl)
mapt <- merge(mapt, bm, by.x='row.names', by.y='ensembl_gene_id')
mapt <- mapt[order(mapt$log2FoldChange),]
ranks <- mapt[,3]
names(ranks) <- mapt$entrezgene
mapt.gsea <- fgsea(pathways, ranks, minSize=15, maxSize=500, nperm=1000)
mapt.gsea <- mapt.gsea[order(mapt.gsea$pval)]
mapt.gsea <- as.data.frame(mapt.gsea)
mapt.gsea <- mapt.gsea[, -ncol(mapt.gsea)] # get rid of last column
write.table(mapt.gsea, "fGSEA_results_hallmark_MAPT.txt", sep="\t", quote=F)
## GRN FGSEA
grn <- as.data.frame(res.grn)
bm <- getBM(attributes = c("ensembl_gene_id", "entrezgene_id"), filters = "ensembl_gene_id", values = rownames(grn), mart = ensembl)
grn <- merge(grn, bm, by.x='row.names', by.y='ensembl_gene_id')
grn <- grn[order(grn$log2FoldChange),]
ranks <- grn[,3]
names(ranks) <- grn$entrezgene
grn.gsea <- fgsea(pathways, ranks, minSize=15, maxSize=500, nperm=1000)
grn.gsea <- grn.gsea[order(grn.gsea$pval)]
grn.gsea <- as.data.frame(grn.gsea)
grn.gsea <- grn.gsea[, -ncol(grn.gsea)] # get rid of last column
write.table(grn.gsea, "fGSEA_results_hallmark_GRN.txt", sep="\t", quote=F)
## C9 FGSEA
c9 <- as.data.frame(res.c9)
bm <- getBM(attributes = c("ensembl_gene_id", "entrezgene_id"), filters = "ensembl_gene_id", values = rownames(c9), mart = ensembl)
c9 <- merge(c9, bm, by.x='row.names', by.y='ensembl_gene_id')
c9 <- c9[order(c9$log2FoldChange),]
ranks <- c9[,3]
names(ranks) <- c9$entrezgene
c9.gsea <- fgsea(pathways, ranks, minSize=15, maxSize=500, nperm=1000)
c9.gsea <- c9.gsea[order(c9.gsea$pval),]
c9.gsea <- as.data.frame(c9.gsea)
c9.gsea <- c9.gsea[, -ncol(c9.gsea)] # get rid of last column
write.table(c9.gsea, "fGSEA_results_hallmark_c9orf72.txt", sep="\t", quote=F)
# Remove Gender effect with Limma
library(limma)
design <- model.matrix(~ md$DISEASE.CODE)
x_noBatch <- removeBatchEffect(rld.mat, batch = md$GENDER, design=design)
nb <- rld
assay(nb) <- x_noBatch
plotPCA(nb, intgroup = "DISEASE.CODE")
png("PCA_RNA_rimod_frontal_GenderCorrected.png", width=800, height=600)
plotPCA(nb, intgroup = "DISEASE.CODE")
dev.off()
## Export for YETI
vst_vals <- rld.mat
rownames(vst_vals) <- str_split(rownames(vst_vals), pattern="[.]", simplify = T)[,1]
# MAPT
mapt.vst <- vst_vals[rownames(deg.mapt),]
write.table(mapt.vst, "MAPT_DEGs_vst_yeti.txt" ,sep="\t", col.names= NA, quote=F)
# GRN
grn.vst <- vst_vals[rownames(deg.grn),]
write.table(grn.vst, "GRN_DEGs_vst_yeti.txt", sep="\t", col.names = NA, quote=F)
# C9
c9.vst <- vst_vals[rownames(deg.c9),]
write.table(c9.vst, "C9_DEGs_vst_yeti.txt" ,sep="\t", col.names=NA, quote=F)
#== Prioritize Genes for Humanbase Use ==#
# Save only significant genes for online tools
hb.mapt <- deg.mapt[abs(deg.mapt$log2FoldChange) > 0.8,]
hb.grn <- deg.grn[abs(deg.grn$log2FoldChange) > 0.8,]
hb.c9 <- deg.c9[abs(deg.c9$log2FoldChange) > 0.8,]
write.table(rownames(hb.mapt), paste("DEGs_HB_lfc0.8_mapt.ndc", "_", region, "_",current_time, ".txt", sep=""), sep="\t", quote=F, row.names=F)
write.table(rownames(hb.grn), paste("DEGs_HB_lfc0.8_grn.ndc", "_", region, "_",current_time, ".txt", sep=""), sep="\t", quote=F, row.names=F)
write.table(rownames(hb.c9), paste("DEGs_HB_lfc0.8_c9.ndc", "_", region, "_",current_time, ".txt", sep=""), sep="\t", quote=F, row.names=F)
|
ff2b6f6cb9cb88c4f0bd8aa47c03582fa139551b | f91d3993ecdceba7e19fcdd16ac31fc30121e114 | /man/model_exponential_gamma.Rd | b43a2a80d1ff06a2a1f8ef7069b5641f44252ef7 | [] | no_license | eliobartos/bayeselio | 5e5b0e5e0bc0dd8808a93969da41d58fc0843234 | 97bba1bca1d54a83d22300ac14794dece7e3eb04 | refs/heads/master | 2023-02-26T22:52:33.625846 | 2021-02-02T11:50:37 | 2021-02-02T11:50:37 | 270,571,898 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,648 | rd | model_exponential_gamma.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_exponential_gamma.R
\name{model_exponential_gamma}
\alias{model_exponential_gamma}
\title{Run bayesian exponential-gamma model for estimating non-zero positive variable}
\usage{
model_exponential_gamma(shape, rate, n_sample, sum_sample, n_post = 1e+05)
}
\arguments{
\item{shape}{Parameter for prior distribution representing the number of samples from before - 1}
\item{rate}{Parameter for prior distribution representing the sum of samples from before}
\item{n_sample}{Total number of cases in your data}
\item{sum_sample}{Sum of variable for each successful case.}
\item{n_post}{Size of sample from posterior distribution}
}
\value{
Vector of samples from posterior distribution representing mean of exponential distribution (1/lambda).
Posterior distribution is Gamma(shape + n_sample, rate + sum_sample)
}
\description{
Runs a bayesian estimation of non-zero positive variable using exponential distribution as likelihood and gamma distribution as conjugate prior.
Posterior distribution is gamma distribution.
Prior used is Gamma(shape, rate)
}
\details{
Mean of prior distribution is shape/rate. Remember Exp(lambda) has mean of 1/lambda.
}
\examples{
post = model_exponential_gamma(0, 0, 20, 100) # No prior information, pror is uniform
post2 = model_exponential_gamma(5, 95, 3, 50) # Prior succes rate is around 5\% with estimation strenght as it was estimated on a sample of 100
mean(post)
quantile(post, probs = c(0.05, 0.95)) # 90\% highest density posterior interval
mean(post2)
quantile(post2, probs = c(0.05, 0.95))
}
\author{
Elio Bartoš
}
|
d3732fd612b5e968d8d6c59fa86bf6cfa8e99e13 | 3a80a99645855f421ecf7fd902ff92f18d1afec8 | /codes/Analysis3.R | ecef3f42588184955f28723406e7dc2728735ded | [] | no_license | hjkim88/Mohamed_Flu | 729c0391900518d3b851bbc14f82a025adc90c3f | 13d33b396d05b0dd840e1d9ee0b1352ebf075b75 | refs/heads/master | 2021-04-09T20:44:51.058756 | 2020-04-09T20:36:44 | 2020-04-09T20:36:44 | 248,878,517 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,092 | r | Analysis3.R | ###
# File name : Analysis3.R
# Author : Hyunjin Kim
# Date : Apr 9, 2020
# Email : [email protected]
# Purpose : There may be a concern that age can affect the cytokine analysis.
# e.g., we see cytokine level difference between IV+ and IV- but
# it can happen due to age difference.
# Therefore, I would like to make pie charts to see age differences
# among the groups and also generate age-corrected cytokine level data,
# then will produce line graphs with the age-corrected data.
#
# Instruction
# 1. Source("Analysis3.R")
# 2. Run the function "flu09_analysis3" - specify the input path and the output directory
# 3. The results will be generated under the output directory
#
# Example
# > source("The_directory_of_Analysis3.R/Analysis3.R")
# > flu09_analysis3(data_path="./data/flu09_cytokine.rda",
# output_dir="./results/analysis3/")
###
flu09_analysis3 <- function(data_path="./data/flu09_cytokine.rda",
output_dir="./results/analysis3/") {
### load libraries
if(!require(ggbeeswarm, quietly = TRUE)) {
install.packages("ggbeeswarm")
require(ggbeeswarm, quietly = TRUE)
}
if(!require(gridExtra, quietly = TRUE)) {
install.packages("gridExtra")
require(gridExtra, quietly = TRUE)
}
### load the data
load(data_path)
### set types of cytokines
### defined by the Mohamed's presentation file
th1_cytokines <- c("IL10",
"GRO",
"IL8",
"GCSF",
"IFNa2",
"FKN",
"MCP1",
"MCP3",
"IL1a",
"IL1Ra2",
"TNFa",
"VEGF")
th2_cytokines <- c("IL4",
"IL5",
"IL6",
"IL9",
"IL13",
"IL15",
"IL17")
### clinical factors to be tested
### must be in colnames(cyto_sample)
factor_list <- c("FLU.Strain.Designation",
"Age.at.Enrollment",
"Race",
"Gender",
"Respiratory.Disease",
"Tobacco.Use")
### data to be tested
plot_df <- cyto_sample[,factor_list]
### annotate additional info - IV+/Resp+, IV+/Resp-, IV-/Resp+, IV-/Resp-
plot_df$IV.Resp <- sapply(1:nrow(plot_df), function(x) {
if(plot_df[x,"FLU.Strain.Designation"] == "Negative" && plot_df[x,"Respiratory.Disease"] == "Yes") {
return("IV-/Resp+")
} else if(plot_df[x,"FLU.Strain.Designation"] == "Negative" && plot_df[x,"Respiratory.Disease"] == "No") {
return("IV-/Resp-")
} else if(plot_df[x,"FLU.Strain.Designation"] != "Negative" && plot_df[x,"Respiratory.Disease"] == "Yes") {
return("IV+/Resp+")
} else if(plot_df[x,"FLU.Strain.Designation"] != "Negative" && plot_df[x,"Respiratory.Disease"] == "No"){
return("IV+/Resp-")
} else {
return(NA)
}
})
### numerize the numeric column
plot_df[,"Age.at.Enrollment"] <- as.numeric(plot_df[,"Age.at.Enrollment"])
cyto_nw[,"Study.Day"] <- as.numeric(cyto_nw[,"Study.Day"])
cyto_plasma[,"Study.Day"] <- as.numeric(cyto_plasma[,"Study.Day"])
### N/A -> NA in Tobacco.Use
plot_df[which(plot_df[,"Tobacco.Use"] == "N/A"),"Tobacco.Use"] <- "NA"
### annotate clinical info to the cytokine level data
plot_df_nw <- merge(cyto_nw[,c("ID", "Study.Day", th1_cytokines, th2_cytokines)], plot_df,
by.x = "ID", by.y = "row.names")
plot_df_ps <- merge(cyto_plasma[,c("ID", "Study.Day", th1_cytokines, th2_cytokines)], plot_df,
by.x = "ID", by.y = "row.names")
### remove samples that are categorized into a group with less than 10 samples
for(factor in setdiff(c(factor_list, "Study.Day"), "Age.at.Enrollment")) {
### NW
unique_vals <- unique(plot_df_nw[,factor])
remove_ind <- NULL
for(val in unique_vals) {
target_ind <- which(plot_df_nw[,factor] == val)
if(length(target_ind) < 10) {
remove_ind <- c(remove_ind, target_ind)
}
}
if(length(remove_ind) > 0) {
plot_df_nw <- plot_df_nw[-remove_ind,]
}
### plasma
unique_vals <- unique(plot_df_ps[,factor])
remove_ind <- NULL
for(val in unique_vals) {
target_ind <- which(plot_df_ps[,factor] == val)
if(length(target_ind) < 10) {
remove_ind <- c(remove_ind, target_ind)
}
}
if(length(remove_ind) > 0) {
plot_df_ps <- plot_df_ps[-remove_ind,]
}
}
### add new column - Age.over.50
plot_df_nw$Age.over.50 <- "FALSE"
plot_df_nw$Age.over.50[plot_df_nw$Age.at.Enrollment >= 50] <- "TRUE"
plot_df_ps$Age.over.50 <- "FALSE"
plot_df_ps$Age.over.50[plot_df_ps$Age.at.Enrollment >= 50] <- "TRUE"
### 1. pie charts
### create result directory
outDir <- paste0(output_dir, "1_Pie_Chart/")
dir.create(path = outDir, showWarnings = FALSE, recursive = TRUE)
### NW
### create an analysis-ready data for the pie charts
pie_data <- vector("list", length = length(unique(plot_df_nw$IV.Resp)))
names(pie_data) <- unique(plot_df_nw$IV.Resp)
for(group in unique(plot_df_nw$IV.Resp)) {
pie_data[[group]] <- data.frame(Age.over.50=c("FALSE", "TRUE"),
n=c(length(intersect(which(plot_df_nw$IV.Resp == group),
which(plot_df_nw$Age.over.50 == "FALSE"))),
length(intersect(which(plot_df_nw$IV.Resp == group),
which(plot_df_nw$Age.over.50 == "TRUE")))),
stringsAsFactors = FALSE, check.names = FALSE)
}
### get pie charts
p <- vector("list", length = length(unique(plot_df_nw$IV.Resp)))
names(p) <- unique(plot_df_nw$IV.Resp)
for(group in unique(plot_df_nw$IV.Resp)) {
total_sample_num <- sum(pie_data[[group]]$n)
pct <- sapply(pie_data[[group]]$n, function(x) signif(x*100/total_sample_num, digits = 3))
pct <- paste0(pie_data[[group]]$n, "(", pct, "%)")
p[[group]] <- ggplot(data = pie_data[[group]], aes(x = "", y = n, fill = Age.over.50)) +
geom_bar(stat = "identity", width = 1) +
coord_polar(theta="y") +
geom_text(label = pct, position = position_stack(vjust = 0.5), size = 6) +
labs(x = NULL, y = NULL, title = group) +
theme_classic(base_size = 16) +
theme(plot.title = element_text(hjust = 0.5, color = "black"),
axis.line = element_blank(),
axis.ticks = element_blank())
}
### arrange the plots and print out
fName <- paste0("Pie_Chart_NW_IV.Resp_Age")
g <- arrangeGrob(grobs = p,
nrow = 2,
ncol = 2,
top = fName)
ggsave(file = paste0(outDir, fName, ".png"), g, width = 15, height = 9, dpi = 300)
### PS
### create an analysis-ready data for the pie charts
pie_data <- vector("list", length = length(unique(plot_df_ps$IV.Resp)))
names(pie_data) <- unique(plot_df_ps$IV.Resp)
for(group in unique(plot_df_ps$IV.Resp)) {
pie_data[[group]] <- data.frame(Age.over.50=c("FALSE", "TRUE"),
n=c(length(intersect(which(plot_df_ps$IV.Resp == group),
which(plot_df_ps$Age.over.50 == "FALSE"))),
length(intersect(which(plot_df_ps$IV.Resp == group),
which(plot_df_ps$Age.over.50 == "TRUE")))),
stringsAsFactors = FALSE, check.names = FALSE)
}
### get pie charts
p <- vector("list", length = length(unique(plot_df_ps$IV.Resp)))
names(p) <- unique(plot_df_ps$IV.Resp)
for(group in unique(plot_df_ps$IV.Resp)) {
total_sample_num <- sum(pie_data[[group]]$n)
pct <- sapply(pie_data[[group]]$n, function(x) signif(x*100/total_sample_num, digits = 3))
pct <- paste0(pie_data[[group]]$n, "(", pct, "%)")
p[[group]] <- ggplot(data = pie_data[[group]], aes(x = "", y = n, fill = Age.over.50)) +
geom_bar(stat = "identity", width = 1) +
coord_polar(theta="y") +
geom_text(label = pct, position = position_stack(vjust = 0.5), size = 6) +
labs(x = NULL, y = NULL, title = group) +
theme_classic(base_size = 16) +
theme(plot.title = element_text(hjust = 0.5, color = "black"),
axis.line = element_blank(),
axis.ticks = element_blank())
}
### arrange the plots and print out
fName <- paste0("Pie_Chart_PS_IV.Resp_Age")
g <- arrangeGrob(grobs = p,
nrow = 2,
ncol = 2,
top = fName)
ggsave(file = paste0(outDir, fName, ".png"), g, width = 15, height = 9, dpi = 300)
}
|
a3be76283e4e6477def67d9d2007b462afc1aa60 | 82eeee2eaf170541c9336a00d14e6ac1f3d6f4fa | /ADSP - 로지스틱 회귀모형.R | 093b834d7b65ba8dc95610462e779b4169631c81 | [] | no_license | wpzero1/RStudy | 58d19493f001f3a2d06e4864eeeb6ac44389c0a2 | 521bd66fcc8982303040b501190c348d7eb6140d | refs/heads/master | 2020-06-27T06:14:08.227373 | 2019-08-16T12:54:30 | 2019-08-16T12:54:30 | 174,159,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,407 | r | ADSP - 로지스틱 회귀모형.R | #로지스틱 회귀모형
# 8장 정형 데이터 마이닝
# 01. 데이터 마이닝 개요 : 거대한 양의 데이터 속에서 쉽게 드러나지 않는 유용한 정보를 찾아내는 과정
# (1) 분류 : 의사결정나무, memory-based reasoning 등
# (2) 추정 : 연속된 변수값 추정. 신경망 모형
# (3) 예측 : 장바구니 분석, 의사결정나무, 신경망 등
# (4) 연관분석 : 장바구니분석
# (5) 군집 : 데이터마이닝이나 모델링의 준비 단계
# (6) 기술
# 데이터마이닝 5단계 : 목적 정의 - 데이터 준비 - 데이터 가공 - 데이터 마이닝 기법 적용 - 검증
# 02. 분류분석
# 1. 로지스틱 회귀모형 *** : 반응변수가 범주형인 경우. 즉 두가지 범주로 되어있을 때, 종속변수와 독립변수간의 관계식을 이용해 두 집단 분류
# 회귀 분석이기에 지도학습으로 분류(Supervised Learning). 선호되는 이유? 독립변수에 대해 어떠한 가정도 필요x, 연속성 및 이산형 모두 가능함.
# 오즈비(Odds ratio) : 변수가 성공/실패로 구성된다면, 한 집단이 다른 집단에 비해 성공할 승산의 비에 대한 측정량
# 성공률/실패율 = Pi/(1-Pi)
# 오즈비는 음이 아닌 실숫값. 성공 가능성 높은 경우 1.0보다 큰 값, 실패가 일어날 가능성이 높으면 1.0보다 작은 값 가짐
# 온도에 따른 거북이 알의 수컷, 암컷 결과 실험
x<-c(27.2,27.7,28.3,28.4,29.9) #온도
male<-c(2,17,26,19,27) #수컷 수
female<-c(25,7,4,8,1) #암컷 수
total=male+female #합계
pmale<-male/total #수컷 비율
# 독립변수 : 온도, 종속변수 : 수컷비율
# 단순선형회귀식
z<-lm(pmale~x) #회귀분석(종속~독립)
summary(z)
#추정 회귀식 : 수컷비율 = -6.9020+0.2673*온도
p<-coefficients(z)[1]+coefficients(z)[2]*x
p
# 1보다 큰 값이 존재함
#로짓변환 값 log(pmale/(1-pmale))을 종속변수로 한 단순성형회귀식 추정.
# 적절한 변환 통해 곡선을 직선 형태로 바꿀 수 있음
logit<-log(pmale/(1-pmale))
z1<-lm(logit~x)
summary(z1)
# 예측값 수컷비 출력
logit2<-coefficients(z1)[1]+coefficients(z1)[2]*x
rmalehat<-exp(logit2)/(1+exp(logit2))
rmalehat
# 로짓변환하여 온도별 예측 확률값은 0~1 사이 값 가짐
# 최대우도추정법 : 관측값들이 가정된 모집단에서 하나의 표본으로 추출될 가능성이 가장 크게 되도록 하는 회귀계수 추정법
logit<-glm(pmale~x,family = "binomial",weights = total)
summary(logit)
# 추정 회귀식 : 수컷 비율 = -61.3183+2.211*온도. 27.3도에서 암컷과 수컷을 구분짓는 경계값이 됨
# 회귀 계수 해석
exp(-61.3183)*exp(2.211*27) #27도에서 오즈 예측값. 0.2
exp(-61.3183)*exp(2.211*28) #28도에서 오즈 예측값. 1.8
#28도에서 오즈 예측값은 27도에서의 오즈 예측값보다 exp(2.211)차이.
#28도에서 암컷에서 수컷으로 부화할 가능성은 0.2*9.125=1.825
# iris 데이터를 이용한 로지스틱 회귀
iris
colnames(iris)<-tolower(colnames(iris)) #컬럼명 소문자로 변환
a<-subset(iris,species=="setosa"|species=="versicolor")
#로지스틱 회귀를 하기 위해 범주가 2개인 setosa=1과 versicolor=2만 추출.
a$species<-factor(a$species) #2개 레벨을 가진 새로운 factor(범주)형
#glm() : 선형회귀분석 lm과 유사. glm(모형,data,family="binomial")
b<-glm(species~sepal.length,data=a,family = binomial)
summary(b)
# sepal.length p-value 유의수준보다 낮아 매우 유의한 변수다.
coef(b)
#sepal.length : 5.140336 (회귀계수)
#로지스틱 회귀계수 값은 exp(5.140336)의 값이므로 약 170배가 된다.
exp(coef(b))["sepal.length"]
#sepal.length가 한 단위 증가함에 따라 Vericolor일 오즈가 10배 증가
fitted(b)[c(1:3,98:100)]
#로지스틱 모델은 0 또는 1로 값을 예측하기에, 0.5이하면 setosa, 0.5이상이면 versicolor 예측값을 의미
predict(b,newdata = a[c(1,50,51,100),],type = "response")
#type을 response로 지정하고 예측 수행하면, 0~1 사이 확률 구해줌
cdplot(species~sepal.length,data=a)
#연속형 변수의 변화에 따른 범주형 변수의 조건부 분포를 보여줌.
#즉 sepal.length가 증가함에 따라 veriscolor의 확률이 증가함을 보여준다.
#다항 로지스틱 회귀분석 : 32종류의 자동차에 대한 11개 변수값 측정 자료
attach(mtcars) #attach는 코드에서 필드이름만으로 데이터에 바로 접근 가능
str(mtcars)
#이항변수 vs(0:flat engine, 1:straight engine)를 반응변수로,
#mpg와 am(Transmission: automatic=0, manual=1)을 예측변수로 하는 로지스틱 회귀모형 추정
vs
mpg
am
glm.vs<-glm(vs~mpg+am,data=mtcars,family = "binomial")
summary(glm.vs)
# 해석
#am이 주어질 때, mpg값이 한 단위 증가할 때 vs=1 오즈가 exp(0.6809)=1.98(98%) 증가
#mpg가 주어질 때 오즈가 대한 am의 효과는 exp(-3.0073)=0.05배. 변속기가 수동인 경우 자동에 비해 vs=1의 오즈가 95% 감소
anova(glm.vs,test="Chisq") #모형의 적합 단계별로 이탈로의 감소량과 유의성 검정 결과 제시
#Mcfadden R square로 모델 fit 확인 가능
install.packages("pscl")
library(pscl)
pR2(glm.vs)
#R square 값이 0.69로, 데이터셋의 분산의 약 69.1% 설명
|
948f246ba577b1f77ecb5c3b12adc9fc1b8c0196 | 99f3d631d68638ea24f16e3008fd48d883606982 | /Car_crash.R | 0b7e3a9f897b0a7f1598b516ca1136722ad5253a | [] | no_license | BN-project/BN-project | d1d9552a7acb9af479298dede8b4123503416620 | 0c6582270a666b66317a75eee95297e249cb9100 | refs/heads/master | 2021-09-03T07:03:14.065512 | 2018-01-06T17:36:31 | 2018-01-06T17:36:31 | 109,718,110 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,740 | r | Car_crash.R |
#### Probabilistic Modelling and Bayesian Networks ####
#### The Project: Car Crashes ####
#############################################################
# Reading the document:
crashes <-read.csv("Car_crash_data_project.csv",header=T,sep=",")
#### Preparation and exploration of data ####
# Number of variables and observations:
dim(crashes) # 684 rows (observations) and 27 columns (variables)
anyNA(crashes) # True
# Name of the variables
names(crashes)
# [1] "Month" "Year" "Week_day"
# [4] "Type_day" "Casualties" "Num_vehicles"
# [7] "Lanes" "Road_width" "Lane_width"
# [10] "Hard_shoulder" "Paved_hard_shoulder" "Security_barriers"
# [13] "Aditional_panels" "Edge_landmarks" "Reflectors"
# [16] "Surface" "Luminosity" "Atmosferic_factors"
# [19] "Limited_visibility" "Danger_signs" "Crash_type"
# [22] "Traffic" "Conc_distraction" "Conc_alcohol_durgs"
# [25] "Conc_speed" "Conc_tyred_sleepy_ill" "Conc_weather"
# 1.- Month
anyNA(crashes$Month) # FALSE
class(crashes$Month) # Factor
levels(crashes$Month) # Months
# length(levels(crashes$Month)) --> 12
summary(crashes$Month) # Frequency tables
# Apr Aug Dec Feb Jan Jul Jun Mar May Nov Oct Sep
# 39 52 67 55 41 67 66 70 47 64 64 52
plot(crashes$Month)
# Categorical, 12 categories, no NA, no 0 frequencies
# 2.- Year
anyNA(crashes$Year) # FALSE
class(crashes$Year) # INTEGER !!
crashes$Year <- as.factor(crashes$Year) # Factor transformation !!
levels(crashes$Year) # 2006, 2015
summary(crashes$Year) # Frequency tables
# 2006 2015
# 431 253
plot(crashes$Year)
# Categorical, 2 categories, no NA, no 0 frequencies
# 3.- Week day
anyNA(crashes$Week_day) # FALSE
class(crashes$Week_day) # INTEGER !!
crashes$Week_day <- as.factor(crashes$Week_day) # Factor transformation !!
levels(crashes$Week_day) # Days in numeric representation
summary(crashes$Week_day) # Frequency tables
# 1 2 3 4 5 6 7
# 86 108 92 90 102 107 99
plot(crashes$Week_day)
# Categorical, 7 categories, no NA, no 0 frequencies
# 4.- Type day
anyNA(crashes$Type_day) # FALSE
class(crashes$Type_day) # Factor
levels(crashes$Type_day) # Holiday, post-holiday, pre-holiday, work-day
summary(crashes$Type_day) # Frequency tables
# Holiday Post_holiday Pre_holiday Work_day
# 125 93 120 346
plot(crashes$Type_day)
# Categorical, 4 categories, no NA, no 0 frequencies
# 5.- Casualties (Damnificados)
anyNA(crashes$Casualties) # FALSE
class(crashes$Casualties) # Integer, N
min(crashes$Casualties); max(crashes$Casualties) # Bounds: [1:33]
table(crashes$Casualties) # Values: 1-10,20,33 ; Frequencies.
# 1 2 3 4 5 6 7 8 9 10 20 33
# 292 221 89 42 20 9 5 2 1 1 1 1
summary(crashes$Casualties) # Summary of statisticals
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1.000 1.000 2.000 2.114 2.250 33.000
boxplot(crashes$Casualties)
# Integer ordinal, Values: 1-10,20,33, no NA
# Poisson (lambda): Lambda estimation
f<-function(lambda) {return(sum(log10(dpois(crashes$Casualties, lambda))))}
lambda.values <- seq(1, 10, 0.1)
loglikelihood <- sapply(lambda.values , FUN=f)
df <- data.frame(Lambda=lambda.values , Loglikelihood=loglikelihood)
ggplot(df, aes(x=Lambda, y=Loglikelihood)) + geom_line(size=1.1) + labs(x=expression(lambda),
y="Log-likelihood") + geom_vline(xintercept=2.1)
# df$Lambda[which(df$Loglikelihood ==max(df$Loglikelihood))] --> 2.1
# 6.- Num_vehicles
anyNA(crashes$Num_vehicles) # FALSE
class(crashes$Num_vehicles) # Integer, N
min(crashes$Num_vehicles); max(crashes$Num_vehicles) # Bounds: [1:6]
table(crashes$Num_vehicles) # Values: 1-6 ; Frequencies.
# 1 2 3 4 5 6
# 329 284 57 9 4 1
summary(crashes$Num_vehicles) # Summary of statisticals
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1.000 1.000 2.000 1.652 2.000 6.000
boxplot(crashes$Num_vehicles)
# Integer ordinal, Values: 1-6, no NA
# Poisson (lambda): Lambda estimation
f<-function(lambda) {return(sum(log10(dpois(crashes$Num_vehicles, lambda))))} lambda.values <- seq(1, 10, 0.1)
loglikelihood <- sapply(lambda.values , FUN=f)
df <- data.frame(Lambda=lambda.values , Loglikelihood=loglikelihood)
ggplot(df, aes(x=Lambda, y=Loglikelihood)) + geom_line(size=1.1) + labs(x=expression(lambda),
y="Log-likelihood") + geom_vline(xintercept=1.7)
# df$Lambda[which(df$Loglikelihood ==max(df$Loglikelihood))] --> 1.7
# 7.- Lanes
anyNA(crashes$Lanes) # FALSE
class(crashes$Lanes) # Integer
min(crashes$Lanes); max(crashes$Lanes) # Bounds: [0:5]
table(crashes$Lanes) # Values: 0-5 ; Frequencies.
# 0 1 2 3 4 5
# 42 26 558 50 6 2
summary(crashes$Lanes) # Summary of statisticals
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.000 2.000 2.000 1.939 2.000 5.000
boxplot(crashes$Lanes)
# Integer ordinal, Values: 0-5, no NA
# 8.- Road_width
anyNA(crashes$Road_width) # TRUE
which(is.na(crashes$Road_width) ==T) # NA values. All of them in 2015 data
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
length(which(is.na(crashes$Road_width) ==T)) # 42
class(crashes$Road_width) # Factor
levels(crashes$Road_width) # "6m_to_7m", "Less_6m", "More_7m"
summary(crashes$Road_width) # Frequency tables
# 6m_to_7m Less_6m More_7m NA's
# 375 29 238 42
plot(crashes$Road_width)
# Categorical, 3 categories, NA:42 (All in 2015), no 0 frequencies
# 9.- Lane_width
anyNA(crashes$Lane_width) # TRUE
which(is.na(crashes$Lane_width) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Lane_width) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Lane_width) ==T)) --> 42
class(crashes$Lane_width) # Factor
levels(crashes$Lane_width) # "325m_to_375m", "Less_325m", "More_375m"
summary(crashes$Lane_width) # Frequency tables
# 325m_to_375m Less_325m More_375m NA's
# 509 88 45 42
plot(crashes$Lane_width)
# Categorical, 3 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 10.- Hard_shoulder (Arcen)
anyNA(crashes$Hard_shoulder) # TRUE
which(is.na(crashes$Hard_shoulder) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Hard_shoulder) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Hard_shoulder) ==T)) --> 42
class(crashes$Hard_shoulder) # Factor
levels(crashes$Hard_shoulder) # "150m_to_250m", "Less_150m", "More_250m", "None"
summary(crashes$Hard_shoulder) # Frequency tables
# 150m_to_250m Less_150m More_250m None NA's
# 47 266 5 324 42
plot(crashes$Hard_shoulder)
# Categorical, 4 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 11.- Paved_hard_shoulder
anyNA(crashes$Paved_hard_shoulder) # TRUE
which(is.na(crashes$Paved_hard_shoulder) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Paved_hard_shoulder) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Paved_hard_shoulder) ==T)) --> 42
class(crashes$Paved_hard_shoulder) # Factor
levels(crashes$Paved_hard_shoulder) # "N", "Y"
summary(crashes$Paved_hard_shoulder) # Frequency tables
# N Y NA's
# 235 407 42
plot(crashes$Paved_hard_shoulder)
# Categorical, 2 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 12.- Security_barriers
anyNA(crashes$Security_barriers) # TRUE
which(is.na(crashes$Security_barriers) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Security_barriers) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Security_barriers) ==T)) --> 42
class(crashes$Security_barriers) # Factor
levels(crashes$Security_barriers) # "N", "Y"
summary(crashes$Security_barriers) # Frequency tables
# N Y NA's
# 391 251 42
plot(crashes$Security_barriers)
# Categorical, 2 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 13.- Additional_panels
anyNA(crashes$Aditional_panels) # TRUE
which(is.na(crashes$Aditional_panels) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Aditional_panels) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Aditional_panels) ==T)) --> 42
class(crashes$Aditional_panels) # Factor
levels(crashes$Aditional_panels) # "N", "Y"
summary(crashes$Aditional_panels) # Frequency tables
# N Y NA's
# 452 190 42
plot(crashes$Aditional_panels)
# Categorical, 2 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 14.- Edge_landmarks
anyNA(crashes$Edge_landmarks) # TRUE
which(is.na(crashes$Edge_landmarks) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Edge_landmarks) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Edge_landmarks) ==T)) --> 42
class(crashes$Edge_landmarks) # Factor
levels(crashes$Edge_landmarks) # "N", "Y"
summary(crashes$Edge_landmarks) # Frequency tables
# N Y NA's
# 280 362 42
plot(crashes$Edge_landmarks)
# Categorical, 2 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 15.- Reflectors
anyNA(crashes$Reflectors) # TRUE
which(is.na(crashes$Reflectors) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Reflectors) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Reflectors) ==T)) --> 42
class(crashes$Reflectors) # Factor
levels(crashes$Reflectors) # "N", "Y"
summary(crashes$Reflectors) # Frequency tables
# N Y NA's
# 324 318 42
plot(crashes$Reflectors)
# Categorical, 2 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 16.- Surface
anyNA(crashes$Surface) # FALSE
class(crashes$Surface) # Factor
levels(crashes$Surface) # "Dry", "Flooded", "Mud_or_gravel", "Oil", "Other", "Snow", "Wet"
# length(levels(crashes$Surface)) --> 7
summary(crashes$Surface) # Frequency tables
# Dry Flooded Mud_or_gravel Oil Other Snow Wet
# 372 4 3 10 17 2 276
plot(crashes$Surface)
# Categorical, 7 categories, no NA, ~ no 0 frequencies
# 17.- Luminosity
anyNA(crashes$Luminosity) # FALSE
class(crashes$Luminosity) # Factor
levels(crashes$Luminosity) # "Artificial_enough", "Artificial_not_enough", "Daylight", "Dusk",
# "Dusk_artificial", "Dusk_no_artificial", "No_light"
# length(levels(crashes$Luminosity)) --> 7
summary(crashes$Luminosity) # Frequency tables
# Artificial_enough Artificial_not_enough Daylight Dusk Dusk_artificial Dusk_no_artificial No_light
# 48 30 468 29 6 14 89
plot(crashes$Luminosity)
# Categorical, 7 categories, no NA, ~ no 0 frequencies
# 18.- Atmospheric_factors
anyNA(crashes$Atmosferic_factors) # TRUE
which(is.na(crashes$Atmosferic_factors) ==T) # NA values. All of them in 2006 data.
# 24 25 26 49 53 55 128 368 380 405 414 419 421
length(which(is.na(crashes$Atmosferic_factors) ==T)) # 13
class(crashes$Atmosferic_factors) # Factor
levels(crashes$Atmosferic_factors) # "Clear", "Cloudy", "Hail" , "Heavy_rain", "Light_fog",
# "Light_rain", "Snowing", "Strong_wind"
# length(levels(crashes$Atmosferic_factors)) --> 8
summary(crashes$Atmosferic_factors) # Frequency tables
# Clear Cloudy Hail Heavy_rain Light_fog Light_rain Snowing Strong_wind NA's
# 395 37 1 59 7 167 2 3 13
plot(crashes$Atmosferic_factors)
# Categorical, 8 categories, NA:13 (All in 2006), no 0 frequencies
# 19.- Limited_visibility
anyNA(crashes$Limited_visibility) # FALSE
class(crashes$Limited_visibility) # Factor
levels(crashes$Limited_visibility) # "N" "Y"
summary(crashes$Limited_visibility) # Frequency tables
# N Y
# 276 408
plot(crashes$Limited_visibility)
# Categorical, 2 categories, no NA, no 0 frequencies
# 20.- Danger_signs
anyNA(crashes$Danger_signs) # TRUE
which(is.na(crashes$Danger_signs) ==T) # NA values. All of them in 2015 data.
# [1] 436 437 448 454 455 467 468 477 480 483 486 489 495 502 518 527 528 537 540 541
# [21] 543 552 558 563 568 570 578 585 598 600 611 616 628 640 641 649 654 657 662 669
# [41] 675 676
which(is.na(crashes$Danger_signs) ==T)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
# length(which(is.na(crashes$Danger_signs) ==T)) --> 42
class(crashes$Danger_signs) # Factor
levels(crashes$Danger_signs) # "No", "Not_needed", "Yes"
# length(levels(crashes$Danger_signs)) --> 3
summary(crashes$Danger_signs) # Frequency tables
# No Not_needed Yes NA's
# 190 141 311 42
plot(crashes$Danger_signs)
# Categorical, 3 categories, NA:42 (All in 2015)(same as Road_width), no 0 frequencies
# 21.- Crash_type
anyNA(crashes$Crash_type) # FALSE
class(crashes$Crash_type) # Factor
levels(crashes$Crash_type) #"Collision", "Crash", "Fall", "Off_the_road", "Other","Ped_run_over", "Rollover"
# length(levels(crashes$Crash_type)) --> 7
summary(crashes$Crash_type) # Frequency tables
# Collision Crash Fall Off_the_road Other Ped_run_over Rollover
# 361 28 23 91 156 14 11
plot(crashes$Crash_type)
# Categorical, 7 categories, no NA, ~ no 0 frequencies
# 22.- Traffic
anyNA(crashes$Traffic) # TRUE
which(is.na(crashes$Traffic) ==T) # NA values. All of them in 2015 data.
# [1] 433 436 437 438 443 445 446 447 448 449 450 451 452 453 454 455 456 459 461 462 463
# [22] 467 468 472 473 476 477 478 479 480 481 482 483 486 488 489 492 493 494 495 497 500
# [43] 502 503 506 508 509 511 512 513 518 519 520 527 528 533 534 537 539 540 541 542 543
# [64] 552 554 555 556 558 559 560 561 563 564 566 567 568 570 571 573 575 576 577 578 580
# [85] 581 583 584 585 586 591 592 593 595 596 598 599 600 601 602 603 604 606 607 610 611
# [106] 612 613 614 616 620 622 625 626 628 629 630 631 632 633 635 636 637 639 640 641 643
# [127] 647 648 649 650 651 654 656 657 658 662 665 669 671 672 673 674 675 676 680 681 682
# [148] 683
intersect(which(is.na(crashes$Traffic) ==T),which(is.na(crashes$Road_width) ==T)
)==which(is.na(crashes$Road_width) ==T) # ALL TRUE
length(which(is.na(crashes$Traffic) ==T)) # 148
class(crashes$Traffic) # Factor
levels(crashes$Traffic) # "Heavy", "Jam", "Light"
# length(levels(crashes$Traffic)) --> 3
summary(crashes$Traffic) # Frequency tables
# Heavy Jam Light NA's
# 33 13 490 148
plot(crashes$Traffic)
# Categorical, 3 categories, NA:148 (All in 2015)(included Road_width), no 0 frequencies
# 23.- Conc_distraction
anyNA(crashes$Conc_distraction) # FALSE
class(crashes$Conc_distraction) # Factor
levels(crashes$Conc_distraction) # "N", "Y"
summary(crashes$Conc_distraction) # Frequency tables
# N Y
# 481 203
plot(crashes$Conc_distraction)
# Categorical, 2 categories, no NA, no 0 frequencies
# 24.- Conc_alcohol_durgs
anyNA(crashes$Conc_alcohol_durgs) # FALSE
class(crashes$Conc_alcohol_durgs) # Factor
levels(crashes$Conc_alcohol_durgs) # "N", "Y"
summary(crashes$Conc_alcohol_durgs) # Frequency tables
# N Y
# 658 26
plot(crashes$Conc_alcohol_durgs)
# Categorical, 2 categories, no NA, no 0 frequencies
# 25.- Conc_speed
anyNA(crashes$Conc_speed) # FALSE
class(crashes$Conc_speed) # Factor
levels(crashes$Conc_speed) # "N", "Y"
summary(crashes$Conc_speed) # Frequency tables
# N Y
# 579 105
plot(crashes$Conc_speed)
# Categorical, 2 categories, no NA, no 0 frequencies
# 26.- Conc_tyred_sleepy_ill
anyNA(crashes$Conc_tyred_sleepy_ill) # FALSE
class(crashes$Conc_tyred_sleepy_ill) # Factor
levels(crashes$Conc_tyred_sleepy_ill) # "N", "Y"
summary(crashes$Conc_tyred_sleepy_ill) # Frequency tables
# N Y
# 588 96
plot(crashes$Conc_tyred_sleepy_ill)
# Categorical, 2 categories, no NA, no 0 frequencies
# 27.- Conc_weather
anyNA(crashes$Conc_weather) # FALSE
class(crashes$Conc_weather) # Factor
levels(crashes$Conc_weather) # "N", "Y"
summary(crashes$Conc_weather) # Frequency tables
# N Y
# 588 96
plot(crashes$Conc_weather)
# Categorical, 2 categories, no NA, no 0 frequencies
#### Separate the samples of 2006 and 2015
crashes2015 <- crashes[which(crashes["Year"] == '2015' ),][,-2]
crashes2006 <- crashes[which(crashes["Year"] == '2006' ),][,-2]
#### Estimation of values for NA of 2006
# Decision tree and estimate values
library(rpart)
crashes2006.test <- crashes2006[which(is.na(crashes2006[,17])),-17]
crashes2006.train <- crashes2006[which(!(is.na(crashes2006[,17]))),]
names(crashes2006.train)
d.tr <- rpart(Atmosferic_factors~., data = crashes2006.train)
plot(d.tr, branch=0, margin=0.25, uniform=TRUE)
text(d.tr, use.n=TRUE, splits=TRUE, pretty=6)
predict(d.tr, crashes2006.test)
# We'll take the highest probability (Minimum max value > .7)
predictions <- predict(d.tr, crashes2006.test, "vector")
atmNAs2006 <- which(is.na(crashes2006[,17]))
# We've tried estimating the values using all the data except from variable Traffic and "the 42 NA instances".
# Just one significant change. We've decided to use the 2006 prediction.
#### Estimation of values for NA of 2015
# Remove Traffic + Remove NA instances
crashes2015 <- crashes2015[,-21]
#names(crashes2015)
crashes2015 <- crashes2015[-(which(is.na(crashes2015$Danger_signs) ==T)),]
# Verification: anyNA(crashes2015)
## Creation of crashes2 (combine the estimation values 1 of 2006 and 2015) to see
# the relation between variables.
crashes2 <-crashes[,-22]
crashes2 <- crashes2[-(which(is.na(crashes2$Danger_signs) ==T)),]
crashes2$Atmosferic_factors[atmNAs] <- levels(crashes2[,18])[predictions]
#### Converting numeric attributes to categorical
# 1. More than 5 casualties -> new category
crashes2[which(crashes2[,5]>5),5]="more_than_5"
crashes2[,5] <-as.factor(crashes2[,5])
# 2. More than 2 vehicles -> new category
crashes2[which(crashes2[,6]>2),6]="more_than_2"
crashes2[,6] <-as.factor(crashes2[,6])
#table(crashes2[,6])
# 3. More than 2 lanes -> new category
crashes2[which(crashes2[,7]>2),7]="more_than_2"
crashes2[,7] <-as.factor(crashes2[,7])
#### Modification of the data observing the first attempt of the model
# 1. Extreme category surface created -> new category
library(car)
crashes2$Surface <- recode(crashes2$Surface,"'Dry'='Dry';'Wet'='Wet';else='Extreme'")
# 2. Surface: grouping Dusk categories -> new category (diff between 2006 and 2015)
crashes2$Luminosity <- recode(crashes2$Luminosity,"'Artificial_enough'='Artificial_enough';'Artificial_not_enough'='Artificial_not_enough';'Daylight'='Daylight';'No_light'='No_light';else='Dusk'")
# Variable relations: Chi-square matrix p-value
chisqmatrix <- function(x) {
names = colnames(x); num = length(names)
m = matrix(nrow=num,ncol=num,dimnames=list(names,names))
for (i in 1:(num-1)) {
for (j in (i+1):num) {
m[i,j] = chisq.test(x[,i],x[,j])$p.value
}
}
return (m)
}
mat = chisqmatrix(crashes2)
mat
# Some frequency table observations
table(crashes2$Type_day,crashes2$Week_day)
table(crashes2$Surface,crashes2$Atmosferic_factors)
table(crashes2$Num_vehicles,crashes2$Casualties)
table(crashes2$Hard_shoulder,crashes2$Paved_hard_shoulder)
table(crashes2$Edge_landmarks,crashes2$Reflectors)
table(crashes2$Atmosferic_factors,crashes2$Conc_weather)
# Remove Week_day, Casualties, Paved_hard_shoulder, Edge landmarks, Atmosferic_factors,
# Crash Type (because of low p-values)
crashes2 <- crashes2[,-c(3,5,11,14,18,21)]
## Finally, we separate data from 2006 and 2015
crashes2.2006 <- crashes2[which(crashes2["Year"] == '2006' ),][,-2]
crashes2.2015 <- crashes2[which(crashes2["Year"] == '2015' ),][,-2]
#### Modelling the data
library(bnlearn)
library(Rgraphviz)
library(ggplot2)
## 2006 data
# Sampling data
set.seed(937) #937
rs <- sample(1:431, 300, replace=F)
crashes2.2006.train <- crashes2.2006[rs,]
crashes2.2006.test <- crashes2.2006[-rs,]
## 2015 data
# Sampling data
rs2 <- sample(1:211, 150, replace=F)
crashes2.2015.train <- crashes2.2015[rs2,]
crashes2.2015.test <- crashes2.2015[-rs2,]
### Modelling (always optimized) 2006
# 1. gs
model2006.gs <- gs(crashes2.2006.train)
# 2. fast.iamb
model2006.iamb <-fast.iamb(crashes2.2006.train)
# 3. mmpc
model2006.mmpc <- mmpc(crashes2.2006.train)
# 4. hc 10/7
model2006.hc.bic <- hc(crashes2.2006.train,restart = 10, perturb = 7)
model2006.hc.k2 <- hc(crashes2.2006.train,restart = 10, perturb = 7,score = "k2")
model2006.hc.bde <- hc(crashes2.2006.train,restart = 10, perturb = 7,score = "bde")
bnlearn::score(x=model2006.hc.bic, data=crashes2.2006.train, type="bic") # -4519.796
bnlearn::score(x=model2006.hc.k2, data=crashes2.2006.train, type="bic") # -4698.222
bnlearn::score(x=model2006.hc.bde, data=crashes2.2006.train, type="bic") # -4519.244
# We will use model2006.hc.bde
# 5. tabu
model2006.tabu.bic <- tabu(crashes2.2006.train)
model2006.tabu.k2 <- tabu(crashes2.2006.train,score = "k2")
model2006.tabu.bde <- tabu(crashes2.2006.train,score = "bde")
bnlearn::score(x=model2006.tabu.bic, data=crashes2.2006.train, type="bic") # -4519.796
bnlearn::score(x=model2006.tabu.k2, data=crashes2.2006.train, type="bic") # -4899.863
bnlearn::score(x=model2006.tabu.bde, data=crashes2.2006.train, type="bic") # -4519.796
# We will use model2006.tabu.bic
# Plotting
graphviz.plot(x=model2006.gs, layout="dot", shape="ellipse")
graphviz.plot(x=model2006.iamb, layout="dot", shape="ellipse")
graphviz.plot(x=model2006.mmpc, layout="dot", shape="ellipse")
graphviz.plot(x=model2006.hc.bde, layout="dot", shape="ellipse")
graphviz.plot(x=model2006.tabu.bic, layout="dot", shape="ellipse")
# Our final model: HC.BDE --> model2006.hc.bde
### Modelling (always optimized) 2015
# 1. gs
model2015.gs <- gs(crashes2.2015.train)
# 2. fast.iamb
model2015.iamb <-fast.iamb(crashes2.2015.train)
# 3. mmpc
model2015.mmpc <- mmpc(crashes2.2015.train)
# 4. hc 10/7
model2015.hc.bic <- hc(crashes2.2015.train,restart = 10, perturb = 7)
model2015.hc.k2 <- hc(crashes2.2015.train,restart = 10, perturb = 7,score = "k2")
model2015.hc.bde <- hc(crashes2.2015.train,restart = 10, perturb = 7,score = "bde")
bnlearn::score(x=model2015.hc.bic, data=crashes2.2015.train, type="bic") # -2241.088
bnlearn::score(x=model2015.hc.k2, data=crashes2.2015.train, type="bic") # -7487.2192
bnlearn::score(x=model2015.hc.bde, data=crashes2.2015.train, type="bic") # -2241.442
# We will use model2015.hc.bde
# 5. tabu
model2015.tabu.bic <- tabu(crashes2.2015.train)
model2015.tabu.k2 <- tabu(crashes2.2015.train,score = "k2")
model2015.tabu.bde <- tabu(crashes2.2015.train,score = "bde")
bnlearn::score(x=model2015.tabu.bic, data=crashes2.2015.train, type="bic") # -2241.088
bnlearn::score(x=model2015.tabu.k2, data=crashes2.2015.train, type="bic") # -7181.836
bnlearn::score(x=model2015.tabu.bde, data=crashes2.2015.train, type="bic") # -2241.442
# We will use model2015.tabu.bic
# Plotting
graphviz.plot(x=model2015.gs, layout="dot", shape="ellipse")
graphviz.plot(x=model2015.iamb, layout="dot", shape="ellipse")
graphviz.plot(x=model2015.mmpc, layout="dot", shape="ellipse")
graphviz.plot(x=model2015.hc.bde, layout="dot", shape="ellipse")
graphviz.plot(x=model2015.tabu.bic, layout="dot", shape="ellipse")
# Our final model: HC.BDE --> model2015.hc.bde
### Generalisation and Overfitting
eval_struct <- function(bn, train, test){
n = dim(train)[1] + dim(test)[1]
results = c()
for (size in sizes) {
d <- train[1:size,]
bnf <- bn.fit(x=bn, data=d, method = "bayes")
gener <- stats::logLik(bnf, test)/(n*dim(test)[1])
fit <- stats::logLik(bnf,d)/(n*dim(d)[1])
results = c(results, gener, fit)
}
return(matrix(results, ncol = 2, byrow = T))
}
set.seed(937) #937
plotmodel1 <-eval_struct(model2006.hc.bic,crashes2.2006.train,crashes2.2006.test)
plot(log10(sizes), plotmodel1[,1], main="Generalisation 2006",
ylab="LL/nN", xlab="log(Size)", type="l", col="blue", ylim=c(min(plotmodel1), max(plotmodel1)))
lines(log10(sizes), plotmodel1[, 2], col="red")
sizes <- round(exp(seq(1, log(150), (log(150) - 1) / 20)))
plotmodel2 <-eval_struct(model2015.hc.bic,crashes2.2015.train,crashes2.2015.test)
plot(log10(sizes), plotmodel2[,1], main="Generalisation 2015",
ylab="LL/nN", xlab="log(Size)", type="l", col="blue", ylim=c(min(plotmodel2), max(plotmodel2)))
lines(log10(sizes), plotmodel2[, 2], col="red")
#### Inference
library(gRain)
# grain object
model2006.hc.bde.grain <- grain(as(amat(model2006.hc.bde), "graphNEL"), data=crashes2.2006.train, smooth=1/dim(crashes2.2006.train)[1])
plot(model2006.hc.bde.grain)
# Compilation
model2006.hc.bde.compiled <- compile(model2006.hc.bde.grain)
summary(model2006.hc.bde.compiled)
# Propagate
model2006.hc.bde.propagated <- propagate(model2006.hc.bde.compiled)
summary(model2006.hc.bde.propagated)
# Same process to 2015 data
model2015.hc.bde.grain <- grain(as(amat(model2015.hc.bde), "graphNEL"), data=crashes2.2015.train,smooth=1/dim(crashes2.2015.train)[1])
model2015.hc.bde.compiled <- compile(model2015.hc.bde.grain)
model2015.hc.bde.propagated <- propagate(model2015.hc.bde.compiled)
### Probability estimation
# 1) Marginal prob.: Conc_alcohol_durgs
querygrain(model2006.hc.bde.propagated , nodes="Conc_alcohol_durgs", type="marginal")
# N Y
# 0.95997981 0.04002019
querygrain(model2015.hc.bde.propagated , nodes="Conc_alcohol_durgs", type="marginal")
# N Y
# 0.97319288 0.02680712
# 2) Marginal prob.: Limited visibilty. Evidence: Reflectors
querygrain(model2006.hc.bde.propagated , nodes="Limited_visibility", type="marginal")
# N Y
# 0.2333501 0.7666499
querygrain(model2015.hc.bde.propagated , nodes="Limited_visibility", type="marginal")
# N Y
# 0.5924582 0.4075418
# With reflectors? Evidence
model2006.hc.bde.propagated.ev2 <- setEvidence(model2006.hc.bde.propagated,nodes="Reflectors",states="Y", propagate=TRUE)
querygrain(model2006.hc.bde.propagated.ev2 , nodes="Limited_visibility", type="marginal")
# N Y
# 0.3046502 0.6953498
# --> True
model2015.hc.bde.propagated.ev2 <- setEvidence(model2015.hc.bde.propagated,nodes="Reflectors",states="Y", propagate=TRUE)
querygrain(model2015.hc.bde.propagated.ev2 , nodes="Limited_visibility", type="marginal")
# N Y
# 0.5924582 0.4075418 The same (nodes no connected)
# 3) Marginal prob.: Num_of_cars. Evidences: Distraction and Limited visibility
# 2006 data
model2006.hc.bde.propagated.ev3 <- setEvidence(model2006.hc.bde.propagated,nodes=c("Limited_visibility","Conc_distraction"),states=c("Y","Y"), propagate=TRUE)
querygrain(model2006.hc.bde.propagated.ev3 , nodes="Num_vehicles", type="marginal")
# 1 2 more_than_2
# 0.1792610 0.6980846 0.1226544
model2006.hc.bde.propagated.ev4 <- setEvidence(model2006.hc.bde.propagated,nodes=c("Limited_visibility","Conc_distraction"),states=c("N","N"), propagate=TRUE)
querygrain(model2006.hc.bde.propagated.ev4 , nodes="Num_vehicles", type="marginal")
# 1 2 more_than_2
# 0.5566936 0.3195912 0.1237152
# Single vehicle accidents increase
# 2015 data
model2015.hc.bde.propagated.ev3 <- setEvidence(model2015.hc.bde.propagated,nodes=c("Limited_visibility","Conc_distraction"),states=c("Y","Y"), propagate=TRUE)
querygrain(model2015.hc.bde.propagated.ev3 , nodes="Num_vehicles", type="marginal")
# 1 2 more_than_2
# 0.4561207 0.4265556 0.1173237
model2015.hc.bde.propagated.ev4 <- setEvidence(model2015.hc.bde.propagated,nodes=c("Limited_visibility","Conc_distraction"),states=c("N","N"), propagate=TRUE)
querygrain(model2015.hc.bde.propagated.ev4 , nodes="Num_vehicles", type="marginal")
# 1 2 more_than_2
# 0.5393078 0.3587505 0.1019417
# Single vehicle accidents increase
# 4) Marginal prob.: Surface
querygrain(model2006.hc.bde.propagated , nodes="Surface", type="marginal")
# Dry Extreme Wet
# 0.58332044 0.04335095 0.37332861
querygrain(model2015.hc.bde.propagated , nodes="Surface", type="marginal")
# Dry Extreme Wet
# 0.52657241 0.06011447 0.41331312
# 5) Marginal prob.: Month
querygrain(model2006.hc.bde.propagated , nodes="Month", type="marginal")
# Apr Aug Dec Feb Jan Jul
# 0.05000444 0.06333600 0.07666756 0.09333200 0.07333467 0.08000044
# Jun Mar May Nov Oct Sep
# 0.11666222 0.08999911 0.06666889 0.10999644 0.09999778 0.08000044
querygrain(model2015.hc.bde.propagated , nodes="Month", type="marginal")
# Apr Aug Dec Feb Jan Jul
# 0.07333866 0.11331734 0.08666489 0.05334932 0.06667555 0.13996979
# Jun Mar May Nov Oct Sep
# 0.07333866 0.11331734 0.06667555 0.06667555 0.08666489 0.06001244
|
76823b0f87c1804347ea8025cf8bc9fe9f60283d | 3d78414d840fb586325c73f0b116e908e3179163 | /R/parse_quiz_metadata.R | faf839f29362a3ceaff8087755ef3808044feda1 | [] | no_license | kamclean/cowboy | cf537c956912c1b7b431ac5fd794e1222bf017d6 | c04a19ce231db60a70ece3ee16d48db85d8aa56a | refs/heads/main | 2023-04-02T11:28:22.789164 | 2021-04-05T13:45:06 | 2021-04-05T13:45:06 | 354,847,284 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,623 | r | parse_quiz_metadata.R | # parse_quiz_metadata--------------------------------
# Documentation
#' Creates a moodle quiz data dictionary
#' @description Wrangles files directly extracted from moodle to create a formatted data dictionary for the moodle quiz
#' @param question_xml Output from xml2::read_xml() applied to Moodle Quiz XML file. From the quiz settings, select "Question Bank" then "Export" then download "Moodle XML" file.
#' @param question_stats Output from readr::read_csv() applied to Moodle Quiz structure analysis CSV file. From the quiz settings, select "Results" then "Statistics" then download "Quiz structure analysis" table as CSV file.
#' @return Returns a formatted data dictionary for the moodle quiz
#' @import dplyr
#' @import xml2
#' @import tibble
#' @import stringr
#' @import tidyr
#' @importFrom purrr map_chr is_empty map_dfr
#' @export
parse_quiz_metadata <- function(question_xml, question_stats){
parse_quiz_question <- function(question_stats){
out <- question_stats %>%
dplyr::filter(suppressWarnings(is.na(as.numeric(`Quiz name`))==F)) %>%
dplyr::select(response = `Quiz name`, class = `Course name`, name_short = `Number of complete graded first attempts`) %>%
dplyr::mutate(class = case_when(class=="Embedded answers (Cloze)" ~ "cloze",
class=="Matching" ~ "matching",
class=="Matrix/Kprime" ~ "matrix",
class=="Multiple choice" ~ "multichoice",
class=="Short answer" ~ "shortanswer",
TRUE ~ class)) %>%
dplyr::mutate(response = paste0("Response ", response))
return(out)}
question <- parse_quiz_question(question_stats = question_stats)
list <- question_xml %>% xml2::xml_children()
table <- list %>%
purrr::map_chr(function(x){xml2::xml_attr(x,"type")}) %>%
tibble::enframe(name = "n", value = "class") %>%
dplyr::mutate(name_short = list %>%
purrr::map_chr(function(x){y = xml2::xml_find_all(x, "name") %>%
xml2::xml_find_all("text") %>%
xml2::xml_text()
if(purrr::is_empty(y)==T){y <- NA_character_}
return(y)}),
name_long = list %>%
purrr::map_chr(function(x){y = xml2::xml_find_all(x, "questiontext") %>%
xml2::xml_find_all("text")%>%
xml2::xml_text()
if(purrr::is_empty(y)==T){y <- NA_character_}
return(y)})) %>%
dplyr::mutate(across(c(name_short, name_long),
function(x){x %>% stringr::str_remove_all("<.*?>") %>%
stringr::str_replace_all(" ", " ") %>% stringr::str_trim()})) %>%
dplyr::mutate(level_matching = list %>%
purrr::map_chr(function(x){y = x %>%
xml2::xml_find_all("subquestion") %>% xml2::xml_find_all("text") %>%
xml2::xml_text()
if(purrr::is_empty(y)==T){y <- NA_character_}
return(paste0(y, collapse = "; "))}),
level_multichoice = list %>%
purrr::map_chr(function(x){y = x %>%
xml2::xml_find_all("answer") %>%
xml2::xml_find_all("text") %>%
xml2::xml_text()
if(purrr::is_empty(y)==T){y <- NA_character_}
return(paste0(y, collapse = "; "))})) %>%
dplyr::mutate(across(c(level_matching, level_multichoice),
function(x){ x %>%
stringr::str_remove_all("<.*?>|\n|\\]\\]>") %>%
stringr::str_replace_all(" ", " ") %>%
stringr::str_replace_all( "NA|\\*", NA_character_)})) %>%
dplyr::mutate(n = as.character(n)) %>%
dplyr::filter(! class %in% c("category", "description"))
table <- table %>%
dplyr::left_join(question, by = c("class", "name_short")) %>%
dplyr::filter(is.na(response)==F)
matrix <- list %>%
purrr::map_dfr(.id = "n", function(x){y = x %>%
xml2::xml_find_all("row") %>%
xml2::xml_find_all("shorttext")%>%
xml2::xml_text() %>%
tibble::enframe(name = NULL, value = "name_long_matrix") %>%
dplyr::mutate(name_long_matrix = stringr::str_trim(name_long_matrix),
level_matrix = x %>%
xml2::xml_find_all("col") %>%
xml2::xml_find_all("shorttext")%>%
xml2::xml_text() %>% stringr::str_trim() %>%
paste0(collapse = "; "))})
cloze <- table %>%
dplyr::select(-level_multichoice, -level_matching) %>%
dplyr::filter(class=="cloze") %>%
# remove periods (.) not at end of sentence
dplyr::mutate(name_long = stringr::str_replace_all(name_long, paste0(c("e\\.g\\.", "E\\.g\\."), collapse = "|"), "E_g_"),
name_long = stringr::str_replace_all(name_long, "\\}Please", "\\}. Please"),
name_long = stringr::str_replace_all(name_long,
paste0(paste0("\\}", c("a", "b", "c", "d", "e", "f"), "\\)\\."), collapse = "|"),
"\\}\\."),
name_long = stringr::str_replace_all(name_long,
paste0(paste0(":", c("a", "b", "c", "d", "e", "f"), "\\)\\."), collapse = "|"),
"\\:\\.")) %>%
tidyr::separate_rows(name_long, sep = "\\.") %>%
dplyr::filter(stringr::str_detect(name_long, "\\}")) %>%
dplyr::mutate(name_long = stringr::str_trim(name_long)) %>%
dplyr::mutate(value = stringr::str_extract_all(name_long, "\\{(.*?)\\}"),
name_long = stringr::str_replace_all(name_long, "\\{(.*?)\\}", "___"),
value = stringr::str_remove_all(value, "\\{1:MULTICHOICE:=|\\}|~"),
name_long = stringr::str_trim(name_long)) %>%
tidyr::separate_rows(value, sep = "=") %>%
dplyr::filter(value!="")
cloze_sum <- cloze %>%
dplyr::distinct(name_short,name_long) %>%
dplyr::group_by(name_short) %>%
dplyr::summarise(name_long = unique(name_long),
nq = 1:n(),
.groups= "drop")
cloze <- cloze %>%
dplyr::left_join(cloze_sum, by = c("name_short", "name_long")) %>%
dplyr::group_by(name_short,name_long, nq) %>%
dplyr::summarise(level_cloze = paste0(value, collapse = "; "), .groups = "drop") %>%
dplyr::rename("name_long_cloze" = name_long) %>%
dplyr::arrange(name_short,nq) %>%
dplyr::select(-nq)
final <- table %>%
dplyr::left_join(matrix, by = "n") %>%
dplyr::left_join(cloze, by = "name_short") %>%
dplyr::mutate(name_long = case_when(class=="matrix" ~ name_long_matrix,
class=="cloze" ~ name_long_cloze,
TRUE ~ name_long)) %>%
dplyr::mutate(name_long_3char = stringr::str_sub(name_long, nchar(name_long)-2, nchar(name_long)),
name_long = ifelse(name_long_3char=="___",#
stringr::str_sub(name_long, 1, nchar(name_long)-3),
name_long)) %>%
dplyr::select(-name_long_matrix, -name_long_cloze, - name_long_3char,-n) %>%
tidyr::unite(col = "level", starts_with("level_"), sep = "&&&", na.rm = T) %>%
dplyr::mutate(level = ifelse(level == "", NA, level),
level = stringr::str_to_title(level) %>% iconv(to="ASCII//TRANSLIT") %>%
stringr::str_squish() %>% stringr::str_trim(),
level = stringr::str_replace_all(level, "E_g_","E.g."),
level = stringr::str_split(level, pattern = "; ")) %>%
dplyr::group_by(name_short) %>%
dplyr::mutate(n = 1:n(),
total = n(),
n = ifelse(n==1&total==1, "", as.character(n)),
name_short =ifelse(n=="", name_short, paste0(name_short, "___", n)),
response = ifelse(n=="", response, paste0(response, "-", n)),
response_n = stringr::str_extract(response, "([0-9]+)") %>% as.numeric()) %>%
dplyr::ungroup() %>%
dplyr::arrange(response_n,n) %>%
dplyr::mutate(response = factor(response, levels = unique(response))) %>%
dplyr::select(-n, -total, -response_n)
return(final)}
|
bdf2d1a16d9247a3a11920610294a4337c8fae48 | 567e72b5e2be621384ef613a914ed6f41ee1cc3a | /R/make_groups.R | d5dabf24daaf14070ae961e0ea2e4a625660ab0f | [
"MIT"
] | permissive | tidymodels/rsample | 2dce83f71b341938b00ecd39bde74c654fcb78c1 | be593b9e5502998832a0a939197bfc3a4b46738b | refs/heads/main | 2023-08-30T23:11:24.971458 | 2023-08-23T15:12:18 | 2023-08-23T15:12:18 | 89,093,570 | 251 | 63 | NOASSERTION | 2023-08-23T08:13:42 | 2017-04-22T19:19:58 | R | UTF-8 | R | false | false | 11,454 | r | make_groups.R | #' Make groupings for grouped rsplits
#'
#' This function powers grouped resampling by splitting the data based upon
#' a grouping variable and returning the assessment set indices for each
#' split.
#'
#' @inheritParams vfold_cv
#' @param group A variable in `data` (single character or name) used for
#' grouping observations with the same value to either the analysis or
#' assessment set within a fold.
#' @param balance If `v` is less than the number of unique groups, how should
#' groups be combined into folds? Should be one of
#' `"groups"`, `"observations"`, `"prop"`.
#' @param ... Arguments passed to balance functions.
#'
#' @details
#' Not all `balance` options are accepted -- or make sense -- for all resampling
#' functions. For instance, `balance = "prop"` assigns groups to folds at
#' random, meaning that any given observation is not guaranteed to be in one
#' (and only one) assessment set. That means `balance = "prop"` can't
#' be used with [group_vfold_cv()], and so isn't an option available for that
#' function.
#'
#' Similarly, [group_mc_cv()] and its derivatives don't assign data to one (and
#' only one) assessment set, but rather allow each observation to be in an
#' assessment set zero-or-more times. As a result, those functions don't have
#' a `balance` argument, and under the hood always specify `balance = "prop"`
#' when they call [make_groups()].
#'
#' @keywords internal
make_groups <- function(data,
group,
v,
balance = c("groups", "observations", "prop"),
strata = NULL,
...) {
rlang::check_dots_used(call = rlang::caller_env())
balance <- rlang::arg_match(balance, error_call = rlang::caller_env())
data_ind <- tibble(
..index = seq_len(nrow(data)),
..group = group
)
data_ind$..group <- as.character(data_ind$..group)
res <- switch(
balance,
"groups" = balance_groups(
data_ind = data_ind,
v = v,
strata = strata,
...
),
"observations" = balance_observations(
data_ind = data_ind,
v = v,
strata = strata,
...
),
"prop" = balance_prop(
data_ind = data_ind,
v = v,
strata = strata,
...
)
)
data_ind <- res$data_ind
keys <- res$keys
data_ind$..group <- as.character(data_ind$..group)
keys$..group <- as.character(keys$..group)
data_ind <- data_ind %>%
full_join(keys, by = "..group") %>%
arrange(..index)
split_unnamed(data_ind$..index, data_ind$..folds)
}
balance_groups <- function(data_ind, v, strata = NULL, ...) {
if (is.null(strata)) {
balance_groups_normal(data_ind, v, ...)
} else {
balance_groups_strata(data_ind, v, strata, ...)
}
}
balance_groups_normal <- function(data_ind, v, ...) {
rlang::check_dots_empty()
unique_groups <- unique(data_ind$..group)
keys <- data.frame(
..group = unique_groups,
..folds = sample(
rep(seq_len(v), length.out = length(unique_groups))
)
)
list(
data_ind = data_ind,
keys = keys
)
}
balance_groups_strata <- function(data_ind, v, strata, ...) {
rlang::check_dots_empty()
data_ind$..strata <- strata
# Create a table that's all the unique group x strata combinations:
keys <- vctrs::vec_unique(data_ind[c("..group", "..strata")])
# Create as many fold IDs as there are group x strata,
# in repeating order (1, 2, ..., n, 1, 2, ..., n)
folds <- rep(1:v, length.out = nrow(keys))
# Split the folds based on how many groups are within each strata
# So if the first strata in sort is 3, and v is 2, that strata gets a
# c(1, 2, 1) for fold IDs
#
# This means that, if nrow(keys) %% v == 0, each fold should have
# the same number of groups from each strata
#
# We randomize "keys" here so that the function is stochastic even for
# strata with only one group:
unique_strata <- unique(keys$..strata)
keys_order <- sample.int(length(unique_strata))
# Re-order the keys data.frame based on the reshuffled strata variable:
keys <- keys[
order(match(keys$..strata, unique_strata[keys_order])),
]
# And split both folds and keys with the reordered strata vector:
folds <- split_unnamed(folds, keys$..strata)
keys <- split_unnamed(keys, keys$..strata)
# Randomly assign fold IDs to each group within each strata
keys <- purrr::map2(
keys,
folds,
function(x, y) {
x$..folds <- sample(y)
x
}
)
keys <- dplyr::bind_rows(keys)
keys <- keys[c("..group", "..folds")]
list(
data_ind = data_ind,
keys = keys
)
}
balance_observations <- function(data_ind, v, strata = NULL, ...) {
rlang::check_dots_empty()
n_obs <- nrow(data_ind)
target_per_fold <- 1 / v
# This is the core difference between stratification and not:
#
# Without stratification, data_ind is broken into v groups,
# which are roughly balanced based on the number of observations
#
# With strata, data_ind is split up by strata, and then each _split_
# is broken into v groups (which are then combined with the other strata);
# the balancing for each fold is done separately inside each strata "split"
data_splits <- if (is.null(strata)) {
list(data_ind)
} else {
split_unnamed(data_ind, strata)
}
freq_table <- purrr::map(
data_splits,
balance_observations_helper,
v = v,
target_per_fold = target_per_fold
) %>%
list_rbind()
collapse_groups(freq_table, data_ind, v)
}
balance_observations_helper <- function(data_split, v, target_per_fold) {
n_obs <- nrow(data_split)
# Create a frequency table counting how many of each group are in the data:
freq_table <- vec_count(data_split$..group, sort = "location")
# Randomly shuffle that table, then assign the first few rows to folds
# (to ensure that each fold gets at least one group assigned):
freq_table <- freq_table[sample.int(nrow(freq_table)), ]
freq_table$assignment <- NA
# Assign the first `v` rows to folds, so that each fold has _some_ data:
freq_table$assignment[seq_len(v)] <- seq_len(v)
# Each run of this loop assigns one "NA" assignment to a fold,
# so we won't get caught in an endless loop here
while (any(is.na(freq_table$assignment))) {
# Get the index of the next row to be assigned, and its count:
next_row <- which(is.na(freq_table$assignment))[[1]]
next_size <- freq_table[next_row, ]$count
# Calculate which fold to assign this new row into:
group_breakdown <- freq_table %>%
# The only NA column in freq_table should be assignment
# So this should only drop un-assigned groups:
stats::na.omit() %>%
# Group by fold assignments and count data in each fold:
dplyr::group_by(.data$assignment) %>%
dplyr::summarise(count = sum(.data$count), .groups = "drop") %>%
# Calculate...:
dplyr::mutate(
# The proportion of data in each fold so far,
prop = .data$count / n_obs,
# The amount off from the target proportion so far,
pre_error = abs(.data$prop - target_per_fold),
# The amount off from the target proportion if we add this new group,
if_added_count = .data$count + next_size,
if_added_prop = .data$if_added_count / n_obs,
post_error = abs(.data$if_added_prop - target_per_fold),
# And how much better or worse adding this new group would make things
improvement = .data$post_error - .data$pre_error
)
# Assign the group in question to the best fold and move on to the next one:
most_improved <- which.min(group_breakdown$improvement)
freq_table[next_row, ]$assignment <-
group_breakdown[most_improved, ]$assignment
}
freq_table
}
balance_prop <- function(prop, data_ind, v, replace = FALSE, strata = NULL, ...) {
rlang::check_dots_empty()
check_prop(prop, replace)
# This is the core difference between stratification and not:
#
# Without stratification, `prop`% of `data_ind` is sampled `v` times;
# the resampling is done with the entire set of groups
#
# With strata, data_ind is split up by strata, and then each _split_
# has `prop`% of `data_ind` is sampled `v` times;
# the resampling for each iteration is done inside each strata "split"
data_splits <- if (is.null(strata)) {
list(data_ind)
} else {
split_unnamed(data_ind, strata)
}
freq_table <- purrr::map(
data_splits,
balance_prop_helper,
prop = prop,
v = v,
replace = replace
) %>%
list_rbind()
collapse_groups(freq_table, data_ind, v)
}
balance_prop_helper <- function(prop, data_ind, v, replace) {
freq_table <- vec_count(data_ind$..group, sort = "location")
# Calculate how many groups to sample each iteration
# If sampling with replacement,
# set `n` to the number of resamples we'd need
# if we somehow got the smallest group every time.
# If sampling without replacement, just reshuffle all the groups.
n <- nrow(freq_table)
if (replace) n <- n * prop * sum(freq_table$count) / min(freq_table$count)
n <- ceiling(n)
purrr::map(
seq_len(v),
function(x) {
row_idx <- sample.int(nrow(freq_table), n, replace = replace)
work_table <- freq_table[row_idx, ]
cumulative_proportion <- cumsum(work_table$count) / sum(freq_table$count)
crosses_target <- which(cumulative_proportion > prop)[[1]]
is_closest <- cumulative_proportion[c(crosses_target, crosses_target - 1)]
is_closest <- which.min(abs(is_closest - prop)) - 1
crosses_target <- crosses_target - is_closest
out <- work_table[seq_len(crosses_target), ]
out$assignment <- x
out
}
) %>%
list_rbind()
}
check_prop <- function(prop, replace) {
acceptable_prop <- is.numeric(prop)
acceptable_prop <- acceptable_prop &&
((prop <= 1 && replace) || (prop < 1 && !replace))
acceptable_prop <- acceptable_prop && prop > 0
if (!acceptable_prop) {
rlang::abort(
"`prop` must be a number between 0 and 1.",
call = rlang::caller_env()
)
}
}
collapse_groups <- function(freq_table, data_ind, v) {
data_ind <- dplyr::left_join(
data_ind,
freq_table,
by = c("..group" = "key"),
multiple = "all",
relationship = "many-to-many"
)
data_ind$..group <- data_ind$assignment
data_ind <- data_ind[c("..index", "..group")]
# If a group was never assigned a fold, then its `..group` is NA
#
# If we leave that alone, it winds up messing up our fold assignments,
# because it will be assigned some value in `seq_len(v)`
#
# So instead, we drop those groups here:
data_ind <- stats::na.omit(data_ind)
unique_groups <- unique(data_ind$..group)
keys <- data.frame(
..group = unique_groups,
..folds = sample(rep(seq_len(v), length.out = length(unique_groups)))
)
list(
data_ind = data_ind,
keys = keys
)
}
validate_group <- function(group, data, call = rlang::caller_env()) {
if (!missing(group)) {
group <- tidyselect::vars_select(names(data), !!enquo(group))
if (length(group) == 0) {
group <- NULL
}
}
if (is.null(group) || !is.character(group) || length(group) != 1) {
rlang::abort(
"`group` should be a single character value for the column that will be used for splitting.",
call = call
)
}
if (!any(names(data) == group)) {
rlang::abort("`group` should be a column in `data`.", call = call)
}
group
}
|
c4ca28474db07a49a2db2544d1775f27e0800b4f | 9132689eb7595fccb1811f9fe08cf1981bedeed9 | /man/imputeYn.extra.Rd | fdea4886ab9b738d3a0a29acc64faa1a1a184d90 | [] | no_license | cran/imputeYn | e2423f180bb0d74519291bd7943edb555340f625 | 579a5de507c184f87bad9a87e5844217ee693003 | refs/heads/master | 2021-01-24T06:13:24.603196 | 2015-10-23T22:08:14 | 2015-10-23T22:08:14 | 17,696,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,329 | rd | imputeYn.extra.Rd | \name{imputeYn.extra}
\alias{imputeYn.extra}
\title{
Imputing the Last Largest tied Observations with a new method}
\description{
The method provides one step ahead imputed values for tied censored observations.
}
\usage{
imputeYn.extra(Y, delta, hc.Yn, method = "km.TPQ", trans.sprob=NULL,
stime2=NULL, sprob2=NULL, trace=F)
}
\arguments{
\item{Y}{
response. Typically the logarithmic of the survival time.
}
\item{delta}{
status; it includes value 1 for uncensored and value 0 for censored subject.
}
\item{hc.Yn}{
set of the lifetimes for the last largest censored observations.
}
\item{method}{
one of "it.PDQ" (repeated predicted difference quantity or called iterative (Khan and Shaw, 2013a)), "km.TPQ" (Kaplan-Meier trend predicted quantity or called "extrapolation" (Khan and Shaw, 2012a)). Default is "km.TPQ".
}
\item{trans.sprob}{
use only for "km.TPQ". Transformation for the survival probabilities. This transformation is needed if survival probability versus survival time plot is not linear. It takes "exp" for exponential transformation, and any value for respective power transformation. Default is NULL.
}
\item{stime2}{
use only for "km.TPQ". Survival times after necessary transformation if needed in order to obtain a linear relationship between the survival probability and survival time. Default is NULL.
}
\item{sprob2}{
use only for "km.TPQ". Survival probability after necessary transformation if needed in order to obtain a linear relationship between the survival probability and survival time. Default is NULL.
}
\item{trace}{
If TRUE then Kaplan-Meier survival plots will be printed for both data- the original and the data with imputed values. Default is FALSE.
}
}
\details{
The method is developed for imputing the largest censored observations (can be seen often for heavy censored data) if Kaplan-Meier weights are involved in modeling. For example, if weighted least squares is used then the extra imputing methods will provide one step ahead of the Efron's (1967) tail correction approach. These methods satisfy the Efron's tail correction, achieve less biased estimates, and impute the largest censored observations that are also tied. Furthermore the two methods satisfy the right censoring assumption. If there is heavy censoring toward the right for right censored data, that is found as typical case for many areas like indistry, clinical trials etc, then this function provides the sensible imputations for those censored observations that are the largest and also tied observations. Details are discussed in Khan and Shaw (2013a).
}
\value{
It includes sorted lifetimes, censoring indicators, sorted lifetimes after imputation, censoring indicators after imputation, censored lifetimes for the Y(n)+ observations, imputed lifetimes for the Y(n)+ observations, the survival times, the survival probabilities, the survival times after transformation, the survival probabilities after transformation, the transformation used, and trace.
}
\references{
Efron, B. (1967). The two sample problem with censored data. In Proceedings of the fifth Berkeley symposium on mathematical statistics and probability, Vol. 4, p. 831-853.
Khan and Shaw. (2013a). On Dealing with Censored Largest Observations under Weighted Least Squares. CRiSM working paper, Department of Statistics, University of Warwick, UK, No. 13-07. Also available in \url{http://arxiv.org/abs/1312.2533}.
Khan and Shaw (2013b). Variable Selection with The Modified Buckley-James Method and The Dantzig Selector for High-dimensional Survival Data. Proceedings 59th ISI World Statistics Congress, 25-30 August 2013, Hong Kong, p. 4239-4244.
}
\author{
Hasinur Rahaman Khan and Ewart Shaw}
\examples{
## For Channing House data (heavy censored data)##
\dontrun{require(package="boot")}
\dontrun{time.ch<-channing[1:97,]$time} #for male
\dontrun{delta.ch<-channing[1:97,]$cens} # for male
\dontrun{hc.Yn.m<-rep(137,19)} # there are 19 last largest censored male each has 137 lifetime
\dontrun{imp.PDQ<-imputeYn.extra(time.ch, delta.ch, hc.Yn=hc.Yn.m,
method="it.PDQ", trace=T)}
\dontrun{imp.PDQ}
\dontrun{imp.TPQ<-imputeYn.extra(time.ch, delta.ch, hc.Yn=hc.Yn.m,
method = "km.TPQ", trace=T)}
\dontrun{imp.TPQ}
}
\keyword{imputation} |
ba2efa60bb70dca95462e8c149e022b15150e495 | 0a447c5e9562832c3456a893c1d1bc66a123289a | /exercise08_question1.R | 7ab407dd5c6246d2c20a4bde39135979a97e3b57 | [] | no_license | mcwang25/ICB2019_Exercise08 | 58f04b1f541dd3235701baa98f696733a591fcdc | 81e8a5483520a7c8347be9f52a21957528a4076b | refs/heads/master | 2020-09-11T05:14:59.822703 | 2019-11-22T01:53:30 | 2019-11-22T01:53:30 | 221,950,456 | 0 | 0 | null | 2019-11-15T15:24:26 | 2019-11-15T15:24:25 | null | UTF-8 | R | false | false | 615 | r | exercise08_question1.R | # Makes plot of UW v MSU game score vs time from 1-22-13
# Note assignment says not to worry about how please the graph is visually, merely to focus on necessary control structures
bball <- read.table(file="UWvMSU_1-22-13.txt", header=TRUE)
UWscores <- bball[bball$team=="UW",]
MSUscores <- bball[bball$team=="MSU",]
MSUscores$team <- NULL
UWscores$team <- NULL
MSUscores$cumscore <- ave(MSUscores$score, FUN=cumsum)
UWscores$cumscore <- ave(UWscores$score, FUN=cumsum)
MSUscores$score <- NULL
UWscores$score <- NULL
plot(x=MSUscores$time, y=MSUscores$cumscore, type='l')+lines(x=UWscores$time,y=UWscores$cumscore)
|
49fae1509d576bc7dbd9361bdb905a6edfd77198 | 6dec0f6aec45dff66fcc2f480b03a5ffe3b9408c | /man/gx.eb.Rd | 4c21ba313a116e0c785efdd92695d13b886fe26b | [] | no_license | cran/rgr | 9645638952aa019902d3308e7e6cf04c1112bab7 | 87383fabc4cb3529c4c97493c596f7fd347cf302 | refs/heads/master | 2021-01-21T21:55:06.570408 | 2018-03-05T22:42:52 | 2018-03-05T22:42:52 | 17,699,219 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,860 | rd | gx.eb.Rd | \name{gx.eb}
\alias{gx.eb}
\title{ Computation of Empirical Balances }
\description{
Computes empirical balances (ratios) for the stated columns of a \code{n} by \code{p} matrix of compositional data.
}
\usage{
gx.eb(r, s, xx, ...)
}
\arguments{
\item{r}{ number of parts in the numerator. }
\item{s}{ number of parts in the denominator. }
\item{xx}{ matrix for which the balances for the stated columns will be computed. }
\item{...}{ the column indices of the parts in the numerator, followed by the column indices for the parts in the denominator. The total number of indices must equal the sum of \code{r} and \code{s}. }
}
\value{
\item{z}{ the vector of \code{n} balances. }
}
\note{
Multi-element ratios have a long history in exploration geochemistry, the parts in numerator and denominator being selected on the basis of prior knowledge of the mineralogy and geochemistry of the feature, commonly a mineral occurrence, being sought. As the features are rare events ratios can be used to accentuate their numerical expression to increase their \sQuote{contrast} from the main mass of background data. The use of balances, ratios of geometric means of the parts in the numerator and denominator, accomodates the compositional nature of geochemical data.
}
\references{
Egozcue, J.J. & Pawlowsky-Glahn, V., 2005. Groups of Parts and Their Balances in Compositional Data Analysis. Mathematical Geology, 37(7):795-828.
}
\author{ Robert G. Garrett }
\seealso{ \code{\link{ltdl.fix.df}} }
\examples{
## Make test data available
data(sind.mat2open)
## Compute and display empirical balances for columns 1, 5 & 4
## of the data vs. columns 2 & 3
temp <- gx.eb(3, 2, sind.mat2open, 1, 5, 4, 2, 3)
shape(temp, "Zn.Cu.Cd/Fe.Mn balance")
## Clean-up
rm(sind.mat2open)
rm(temp)
}
\keyword{ multivariate }
|
b8490fbaf83ffd87940f557c645dfc2d99679ccd | 44a2d03cd3f012721b41223c4bb3f4dc5de483a9 | /man/fbind.Rd | 0c3dd4f96d45e27b0bd742ec89320528a282b586 | [
"MIT"
] | permissive | sumalibajaj/modelbuilding_try | fd26b46d846ab897d3dee7fcb15c4dc7ed1339b8 | a79b3151a6c5bc9676b46acda2d521a1aecd51bd | refs/heads/main | 2023-03-30T19:10:43.135402 | 2021-03-29T14:50:58 | 2021-03-29T14:50:58 | 352,672,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 207 | rd | fbind.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fbind.R
\name{fbind}
\alias{fbind}
\title{Title}
\usage{
fbind(a, b)
}
\arguments{
\item{b}{}
}
\value{
}
\description{
Title
}
|
9da97534c0a22240293def3f74e75c1900a77c3a | eb1f7d5c5009758ed188a4ef3d32e67741b4a552 | /R/Expectation.R | cb0a338a571335d3933971f6d636fd48a74be671 | [] | no_license | l-modolo/fdrDEP | d80874419e8173ebfe045c3200dc0d8c0f888465 | e03c54bd02f2d050a5f3b6d556d25582b325c117 | refs/heads/master | 2020-12-24T13:18:11.323501 | 2015-03-23T11:09:39 | 2015-03-23T11:09:39 | 8,160,842 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,892 | r | Expectation.R | Expectation = function(parameters, Mvar)
{
res = list()
f0x = c()
f1x = c()
gammA = matrix(rep(0, parameters[['NUM']]*2), parameters[['NUM']], 2, byrow=TRUE)
omega = c()
f0x = 2*dnorm(parameters[['zvalues']], Mvar$f0[1], Mvar$f0[2]) * (1 - parameters[['kappa']]/Mvar$ptheta[1])
f0x[parameters[['zvalues']]==0] = parameters[['kappa']]/Mvar$ptheta[1]
# f1x
if(parameters[['f1']] == "kernel" | parameters[['f1']] == "kernel.symetric")
{
f1x = Mvar$f1
}
else
{
f1x = rep(0, parameters[['NUM']])
if(parameters[['f1_compartiments']] == 1)
{
f1x = dnorm(parameters[['zvalues']], Mvar$f1[1], Mvar$f1[2])
}
else
{
for (c in 1:parameters[['f1_compartiments']])
{
f1x = f1x+Mvar$pc[c]*dnorm(parameters[['zvalues']], Mvar$f1[c, 1], Mvar$f1[c, 2])
}
}
}
if(parameters[['dependency']] == "none")
{
gammA[,1] = Mvar$ptheta[1]*f0x / ( Mvar$ptheta[1]*f0x + Mvar$ptheta[2]*f1x )
gammA[,2] = 1 - gammA[,1]
if(parameters[['f1']] != "kernel" & parameters[['f1']] != "kernel.symetric" & parameters[['f1_compartiments']] > 1)
{
# omega
omega = matrix(rep(0, parameters[['NUM']]*parameters[['f1_compartiments']]), parameters[['NUM']], parameters[['f1_compartiments']], byrow=TRUE)
for (c in 1:parameters[['f1_compartiments']])
{
f1c = dnorm(parameters[['zvalues']], Mvar$f1[c, 1], Mvar$f1[c, 2])
omega[, c] = gammA[, 2] * Mvar$pc[c]*f1c/f1x
if(length(Mvar$f0) >= 3)
{
omega[parameters[['zvalues']]==0, c] = 0
}
}
}
c0 = f0x*Mvar$ptheta[1] + f1x*Mvar$ptheta[2]
return(list(gammA = gammA, omega = omega, c0 = c0))
}
# forward
alpha = matrix(rep(0, parameters[['NUM']]*2), parameters[['NUM']], 2, byrow=TRUE)
c0 = rep(0, parameters[['NUM']])
alpha[1, 1] = Mvar$pii[1]*f0x[1]
alpha[1, 2] = Mvar$pii[2]*f1x[1]
c0[1] = 1/sum(alpha[1, ])
alpha[1, ] = c0[1]*alpha[1, ]
alpha.tmp = tryCatch({
.C('calAlpha',alpha=as.numeric(alpha),c0=as.numeric(c0),as.numeric(Mvar$A),as.numeric(f0x),as.numeric(f1x),as.integer(parameters[['NUM']]))
}, warning = function(e) {return(-1)}, simpleError = function(e) {return(-1)}, error = function(e) {return(-1)})
if(length(alpha.tmp) == 1)
{
if(parameters[['v']]) cat("Error in calAlpha\n")
return(-1)
}
alpha = alpha.tmp$alpha
dim(alpha) = c(parameters[['NUM']],2)
c0 = alpha.tmp$c0
# backward
beta = matrix(rep(0, parameters[['NUM']]*2), parameters[['NUM']], 2, byrow=TRUE)
beta[parameters[['NUM']], 1] = c0[parameters[['NUM']]]
beta[parameters[['NUM']], 2] = c0[parameters[['NUM']]]
beta.tmp = tryCatch({
.C('calBeta',beta=as.numeric(beta),as.numeric(c0),as.numeric(Mvar$A),as.numeric(f0x),as.numeric(f1x),as.integer(parameters[['NUM']]))
}, warning = function(e) {return(-1)}, simpleError = function(e) {return(-1)}, error = function(e) {return(-1)})
if(length(beta.tmp) == 1)
{
if(parameters[['v']]) cat("Error in calBeta\n")
return(-1)
}
beta = beta.tmp$beta
dim(beta) = c(parameters[['NUM']],2)
# lfdr
lfdr = rep(0, parameters[['NUM']])
lfdr.tmp = tryCatch({
.C('calLfdr',as.numeric(alpha),as.numeric(beta),lfdr=as.numeric(lfdr),as.integer(parameters[['NUM']]))
}, warning = function(e) {return(-1)}, simpleError = function(e) {return(-1)}, error = function(e) {return(-1)})
if(length(lfdr.tmp) == 1)
{
if(parameters[['v']]) cat("Error in calLfdr\n")
return(-1)
}
lfdr = lfdr.tmp$lfdr
# gammA & dgammA
gammA = matrix(rep(0,(parameters[['NUM']]*2)), parameters[['NUM']], 2, byrow=TRUE)
gammA[parameters[['NUM']], ] = c(lfdr[parameters[['NUM']]], 1-lfdr[parameters[['NUM']]])
dgammA = array(rep(0, (parameters[['NUM']]-1)*4), c(2, 2, (parameters[['NUM']]-1)))
gammA.tmp = tryCatch({
.C('calGamma',as.numeric(alpha),as.numeric(beta),as.numeric(Mvar$A),as.numeric(f0x),as.numeric(f1x),gamma=as.numeric(gammA),dgamma=as.numeric(dgammA),as.integer(parameters[['NUM']]))
}, warning = function(e) {return(-1)}, simpleError = function(e) {return(-1)}, error = function(e) {return(-1)})
if(length(gammA.tmp) == 1)
{
if(parameters[['v']]) cat("Error in calGamma\n")
return(-1)
}
gammA = gammA.tmp$gamma
dgammA = gammA.tmp$dgamma
dim(gammA) = c(parameters[['NUM']],2)
dim(dgammA) = c(2, 2, (parameters[['NUM']]-1))
if(parameters[['f1']] != "kernel" & parameters[['f1']] != "kernel.symetric" & parameters[['f1_compartiments']] > 1)
{
# omega
omega = matrix(rep(0, parameters[['NUM']]*parameters[['f1_compartiments']]), parameters[['NUM']], parameters[['f1_compartiments']], byrow=TRUE)
for (c in 1:parameters[['f1_compartiments']])
{
f1c = dnorm(parameters[['zvalues']], Mvar$f1[c, 1], Mvar$f1[c, 2])
omega[, c] = gammA[, 2] * Mvar$pc[c]*f1c/f1x
if(length(Mvar$f0) >= 3)
{
omega[parameters[['zvalues']]==0, c] = 0
}
}
}
return(list(gammA = gammA, dgammA = dgammA, omega = omega, c0 = c0, trans.par = Mvar$trans.par))
}
|
b8dbf79fa2e8d8e3cdcc34f2e49d97756e11d8d2 | 40cf04ee6cf3ffa246ecc069bcb629c0fffc4691 | /R/data.R | 7f5402cdc615bca8ae929953ea5fa0ff69108842 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | akleinhesselink/sedgwickenv | a5c1abf183c54634df2658450d9da875525800b7 | 66ae557277ebacb25b3aacd5dbb7ff11a5cacf38 | refs/heads/master | 2020-03-29T20:36:36.473096 | 2019-12-19T22:23:36 | 2019-12-19T22:23:36 | 150,320,819 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,531 | r | data.R | #' Geolocation and environmental characteristics of 24 study sites at the UCSB Sedgwick Reserve.
#'
#' A dataset describing the position, soil characteristics, light interception,
#' and surface temperature at 24 vegetation study sites at the UCSB Sedgwick Reserve.
#' Sites originally established by Nathan Kraft and Gaurav Kandlikar in 2016.
#' Soil characteristics are based on standard soil analysis conducted by A&L. Western
#' Agricultural labs on samples collected from common gardens at each site by
#' Gaurav Kandlikar. Soil depth and light interception data were collected by
#' Andrew Kleinhesselink.
#'
#' @format A data frame with 24 rows and 30 variables:
#' \describe{
#' \item{site}{Site code based on original site number used by G.K. and N.K.}
#' \item{name}{site nickname}
#' \item{lon}{decimal longitude}
#' \item{lat}{decimal latitude}
#' \item{ele}{elevation (m), determined from USGS DEM}
#' \item{type}{sites either upper or lower, upper sites on serpentine soil}
#' \item{microsite}{for upper sites, sites on hummocks are identified}
#' \item{Tmax}{Average daily Tmax \ifelse{html}{\out{°}}{\°}C temperature for March. Recorded directly with iButton placed in PVC housing on soil surface at each site}
#' \item{Tmin}{Average daily Tmax \ifelse{html}{\out{°}}{\°}C temperature for March. Recorded directly with iButton placed in PVC housing on soil surface at each site}
#' \item{soil_organic}{soil organic matter (\%)}
#' \item{pH}{soil pH}
#' \item{CEC}{soil cation exchange capacity (meq per 100g)}
#' \item{NH4_N}{soil ammonium concentration (ppm)}
#' \item{Nitrate}{soil Nitrate (ppm)}
#' \item{sand}{Soil sand content (\%)}
#' \item{clay}{Soil clay content (\%)}
#' \item{Ca}{soil Calcium (ppm)}
#' \item{Mg}{soil Magnesium (ppm)}
#' \item{K}{soil Potassium (ppm)}
#' \item{light_above}{Ambient light (mmol per m\ifelse{html}{\out{<sup>3</sup>}}{\eqn{^3}} measured at 1.5 m above site at two locations at midday}
#' \item{light_below}{Ambient light (mmol per m\ifelse{html}{\out{<sup>3</sup>}}{\eqn{^3}} measured at soil surface, below any vegetion at the site}
#' \item{light_use}{vegetation light interception. Calculated as (light_above - light_below)/light_above}
#' \item{soil_depth}{Average soil depth (cm). Determined by driving metal rod into two locations in five plots at each site, n = 10}
#' \item{soil_depth_sd}{Standard deviation (cm) of soil depth measurements at each site. n = 10 per site}
#' \item{Apr_2016_GWC}{Soil water content from soils collected April 2016 (g/g)}
#' \item{Apr_2017_GWC}{Soil water content from soils collected April 2017 (g/g)}
#' \item{Jan_2018_GWC}{Soil water content from soils collected January 2018 (g/g)}
#' \item{May_2016_GWC}{Soil water content from soils collected May 2016 (g/g)}
#' \item{avg_soil_moisture}{Average of April and May soil moisture measurements}
#' }
"sedgwick_env"
#' Sedgwick reserve boundary.
#'
#' Spatial boundary of Sedgwick Reserve for making maps. WGS84 geographical projection.
#'
#' @format A SpatialPolygons object.
#' \describe{
#' }
"sedgwick_boundary"
#' Sedgwick Elevation Layer
#'
#' Digital Elevation Model for Sedgwick Reserve downloaded from USGS.
#' 1/3 Arc-second Resolution (~10m). Geographical projection on GRS80 ellipsoid.
#'
#' @format A RasterLayer object with 12 slots
#' \describe{
#' }
"sedgwick_DEM"
#' Sedgwick Surface Temperature Data
#'
#' Spring surface temperatures for each of 24 study sites. Temperatures recorded with
#' iButton temperature loggers housed in short section of white PVC pipe placed on soil
#' surface at each site. Temperatures were logged every 4 hours and maximum and minimum
#' daily temperatures were calculated. Temperatures were recorded from February to June
#' 2016 and then again in 2018.
#'
#' @format A dataframe with 6636 rows and 7 variables
#' \describe{
#' \item{site}{Site code based on original site number used by G.K. and N.K.}
#' \item{date}{Date of sample in "Y-m-d" format}
#' \item{doy}{Day of year}
#' \item{Tmin}{Minimum temperature \ifelse{html}{\out{°}}{\°}C recorded during that day}
#' \item{Tmax}{Maximum temperature \ifelse{html}{\out{°}}{\°}C recorded during that day}
#' \item{Tavg}{Average temperature \ifelse{html}{\out{°}}{\°}C. Tavg \= Tmin + Tamx/2 }
#' \item{daily_range}{Range between maximum and minimum temperature \ifelse{html}{\out{°}}{\°}C during day}
#' }
"sedgwick_ibutton"
#' Soil depth at each of the 24 sites.
#'
#' Soil depth recorded at two locations within five plots (n=10) at each of the 24 site.
#' Soil depth was measured by driving a thin metal rod into the soil and recorded the
#' depth at which rock was hit and the rod stopped penetrating. Depth measured in the
#' spring of 2017 by A. Kleinhesselink.
#'
#' @format A dataframe with 240 rows and 4 variables
#' \describe{
#' \item{site}{Site code based on original site number used by G.K. and N.K.}
#' \item{plot}{Plot number at each site (1-5)}
#' \item{rep}{Two sampling locations located at opposite corners of each plot}
#' \item{depth}{maximum depth (cm)}
#' }
"sedgwick_soil_depth"
#' Spring moisture at each of the 24 sites.
#'
#' Soil moisture was determined gravimetrically in the spring of 2016, 2017 and 2018
#' at each of the 24 sites. Shallow (<10 cm) soil was collected from multiple locations
#' at each site. These were homogenized and then weighed to get a wet weight. They were
#' then dried at 60 \ifelse{html}{\out{°}}{\°}C for 72 hours and then re-weighed
#' to get dry weight. Soil moisture by mass was recorded as (wet-dry)/dry weight.
#'
#' @format A dataframe with 96 rows and 3 variables
#' \describe{
#' \item{site}{Site code based on original site number used by G.K. and N.K.}
#' \item{date}{Date of sample in "Y-m-d" format}
#' \item{GWC}{Soil moisture expressed as fraction of dry weight}
#' }
"sedgwick_soil_moisture"
#' Vegetation height at each of the 24 sites.
#'
#' Vegetation height was recorded in two places in each plot at each site. Height was
#' determined by dropping a ruler in two opposite corners of the plot and recording
#' the height of the vegetation nearest to the ruler.
#'
#' @format A dataframe with 240 rows and 4 variables
#' \describe{
#' \item{site}{Site code based on original site number used by G.K. and N.K.}
#' \item{plot}{Plot number at each site (1-5)}
#' \item{veg_height}{maximum vegetation height (cm)}
#' }
"sedgwick_veg_height"
|
9261d5fe272014c6b2035a9a8c189ef3d4a3a4cf | fb0eda2c5c5364c907b3836b3c849ca2598a09df | /Script_data_table_03_2.R | fbc1dbe77a316f9350a617e2bf51536a0582f924 | [] | no_license | CursoRUCE/R-Intermedio | d3fbec6ec66ab66c134f566e9d2cb1e8e7a17f70 | 67320956724f01dcad1b487e3b3ebd61b63b04c0 | refs/heads/master | 2021-01-10T17:38:43.034711 | 2015-11-29T02:42:16 | 2015-11-29T02:42:16 | 45,076,997 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,604 | r | Script_data_table_03_2.R |
# Creación objetos data.table
# data frame
DF <- data.frame(x=c("b","b","a","a"),y=4:1, z=rnorm(4))
DF
# data.table
#install.packages("data.table", dependencies = TRUE)
library(data.table)
DT <- data.table(x=c("b","b","a","a"),y=4:1, z=rnorm(4))
DT
# data frame vs data.table
head(mtcars, n = 4)
mtcarsDT <- data.table(mtcars)
head(mtcarsDT, n = 4)
# data frame vs data.table
class(mtcars)
mtcarsDT <- data.table(mtcars)
class(mtcarsDT)
# Objetos data.table en área de trabajo
tables()
carsDT <- data.table(cars)
tables()
# data.table a data frame
DT
sapply(DT,class)
# Subsetting filas
DT <- data.table(x=rep(c("a","b","c"),each=2), y=c(0,1), v=1:6)
DT
# Fila 2
DT[2]
# Filas 2, 4, 6
DT[c(2, 4, 6)]
## Subsetting filas
DT
# Filas mediante TRUE y FALSE
DT[c(TRUE, TRUE, FALSE, FALSE, TRUE, TRUE)]
## Subsetting filas
DT
# Filas mediante TRUE y FALSE
DT[c(FALSE,TRUE)] # reciclado
# Subsetting omitir filas
DT
# Omitimos las filas 2, 3, 4
DT[!2:4]
## Subsetting columnas
DT
# Columna de nombre v
DT[,v] # col v como vector
## Subsetting columnas
DT
# Columna de nombre v
DT[,list(v)] # col v como data.table
## Subsetting columnas
DT
# Columna de nombre v
DT[,.(v)] # col v como data.table
# En data.table list() es equivalente a .()
## Subsetting columnas
DT
# Columna 3
DT[,3]
# Para extraer la columna 3 se debe añadir with=FALSE
{r}
DT[,3, with=FALSE]
# Subsetting columnas
DT
# Columnas 1 y 3
DT[,c(1,3), with=FALSE]
# Subsetting y cálculo de variables
DT
# Suma de la variable v sobre las filas 2, 3, 4
DT[2:4]
DT[2:4,sum(v)]
# Subsetting y cálculo de variables
DT
# Media de la variable v sobre las filas 1, 2, 3
DT[1:3]
DT[1:3,mean(v)]
# Setkeys
DT
setkey(DT,x)
tables()
# Subsetting mediante keys
DT
# Seleccionar las filas en las cuales la key sea igual a b
DT["a"] # busqueda binaria (fast)
# Seleccionar las filas en las cuales la key sea igual a b
{r}
DT
# Equivalente a:
DT[c("b","c")]
# Subsetting omitir filas mediante keys
# El simbolo ! en data.table desempeña el mismo papel que el signo - en data frame.
DT
# Omitimos las filas donde key sea igual a a o b
DT[!c("a","b")] # equivalente
# Keyed by
# Para aplicar una función a los grupos generados por una key (tapply()) se utiliza:
DT
DT[,sum(v),by=key(DT)] # agrupamos mediante la key
# Group by
# Para aplicar una función a los grupos generados por una variable se utiliza:
DT
DT[,sum(v),by=y] # agrupamos mediante la variable y
# Group by
# Para aplicar una función a ciertos grupos generados mediate la key utilizamos:
DT["a"]
DT["a",sum(v)] # sum(v) en grupo a
# Group by
# Para aplicar una función a ciertos grupos generados mediate la key utilizamos:
DT[c("a","b")]
# Si se trata de más de un grupo se debe añadir by=.EACHI
DT[c("a","b"),sum(v),by=.EACHI] # sum(v) en grupos a y b
## Joins
# Considere los data tables DT y DT2:
DT
DT2 <- data.table(c("b","c"),z=c(4,2))
DT2
# Para cruzar las información mediante la key de DT y la primera columna de DT2 utilizamos:
DT[DT2]
# Para cruzar la informacion de la primera fila del grupo usamos:
DT[DT2,mult="first"]
# Para cruzar la informacion de la última fila del grupo usamos:
DT[DT2,mult="last"]
# Modificacion de columnas
DT <- data.table(x=rep(c("a","b","c"),each=2), y=c(0,1), v=1:6)
DT
# Para añadir una columna a newcol a DT utilizamos:
DT[,newcol:=8L] # add col por referencia
# Eliminación de columnas
DT
# Para eliminar la columna newcol de DT utilizamos:
DT[,newcol:=NULL] # remove col por referencia
# Modificacion de columnas existentes
setkey(DT,x)
DT
# Para reasignar valores a una variable existente usamos:
print(DT["a",v:=10L]) # subassign a col existente v por referencia
## Modificacion de columnas
setkey(DT,x)
DT
# Para asignar valores de una nueva variable utilizamos:
print(DT["b",v2:=9L]) # subassign a nueva col by por referencia (NA)
# Modificacion de columnas
setkey(DT,x)
DT
# Para asignar a cada elemento de un grupo valores resultantes de aplicar una función a dichos grupos
utilizamos:
DT[,m:=mean(v),by=x] # add nueva col por grupo
## Importando data a R
# read.table(file, header = FALSE, sep = "", dec=".", stringsAsFactors = TRUE, ...)
# fread( )
# fast file reader function
str(fread)
# read.table( ) vs fread( )
# Utilizamos el archivo flights.csv (25MB).
system.time(
read_base <- read.csv("flights.csv", header = TRUE, sep = ",")
)
dim(read_base)
# install.packages("data.table")
library(data.table)
system.time(
read_DT <- fread("flights.csv", sep = ",", header = F)
)
dim(read_DT)
- fread() es aproximadamente 10 veces más rápida que read.csv()
|
f2260b66026b66a7550d4fe05d7739a0d4686660 | 3d29b0de9404f87361979a216214614efce9419c | /man/OC2c.Rd | bdc39cb1a70969abb81edc94ca6d00c9a5b53ecc | [] | no_license | andreaskiermeier/AcceptanceSampling | 2717f283cf840c28fc51a2918e2c2016805227ec | 250d891adda26e152d3404ea1c8aff40dc7b883a | refs/heads/master | 2023-04-30T14:33:51.511179 | 2023-04-19T06:38:16 | 2023-04-19T06:38:16 | 46,367,877 | 2 | 3 | null | 2015-11-23T23:51:11 | 2015-11-17T18:47:01 | R | UTF-8 | R | false | false | 3,516 | rd | OC2c.Rd | \name{OC2c}
\alias{OC2c}
\alias{plot,OC2c-method}
\alias{plot,OCbinomial,missing-method}
\alias{plot,numeric,OCbinomial-method}
\alias{plot,OChypergeom,missing-method}
\alias{plot,numeric,OChypergeom-method}
\alias{plot,OCpoisson,missing-method}
\alias{plot,numeric,OCpoisson-method}
\alias{show,OC2c-method}
\alias{show,OChypergeom-method}
\alias{summary,OC2c-method}
\alias{summary,OChypergeom-method}
\title{Operating Characteristics of an Acceptance Sampling Plan}
\description{ The preferred way of creating new objects from the family
of \code{"OC2c"} classes.}
\usage{OC2c(n,c,r=if (length(c)<=2) rep(1+c[length(c)], length(c)) else NULL,
type=c("binomial","hypergeom", "poisson"), ...) }
\arguments{
\item{n}{A vector of length k giving the sample size at each
of the k stages of sampling, e.g. for double sampling k=2.}
\item{c}{A vector of length k giving the \bold{cumulative}
acceptance numbers at each of the k stages of sampling.}
\item{r}{A vector of length k giving the \bold{cumulative}
rejection numbers at each of the k stages of sampling.}
\item{type}{The possible types relate to the distribution on
which the plans are based on, namely, \code{binomial},
\code{hypergeom}, and \code{poisson}.}
\item{...}{Additional parameters passed to the class
generating function for each type. See Details for options.} }
\details{
Typical usages are:
\preformatted{
OC2c(n, c)
OC2c(n, c, r, pd)
OC2c(n, c, r, type="hypergeom", N, pd)
OC2c(n, c, r, type="poisson", pd)
}
The first and second forms use a default \code{type} of
"binomial". The first form can calculate \code{r} \emph{only} when
\code{n} and \code{c} are of length 1 or 2.
The second form provides a the proportion of defectives, \code{pd}, for
which the OC function should be calculated (default is \code{pd=seq(0,
1, 0.01)}.
The third form states that the OC function based on a Hypergeometric
distribution is desired. In this case the population size \code{N}
also needs to be specified. In this case, \code{pd} indicates the
proportion of population defectives, such that \code{pd*N} gives the
actual number of defectives in the population. If \code{N} or
\code{pd} are not specified they take defaults of \code{N=100} and
\code{pd=seq(0, 1, 0.01)}. A warning is issued if N and D=N*pd are
not integers by checking the value, not the object type.
}
\value{
An object from the family of \code{OC2c-class}, namely of class
\code{OCbinomial}, \code{OChypergeom}, or \code{OCpoisson}.
}
\seealso{
\code{\link{OC2c-class}}
}
\examples{
## A standard binomial sampling plan
x <- OC2c(10,1)
x ## print out a brief summary
plot(x) ## plot the OC curve
plot(x, xlim=c(0,0.5)) ## plot the useful part of the OC curve
## A double sampling plan
x <- OC2c(c(125,125), c(1,4), c(4,5), pd=seq(0,0.1,0.001))
x
plot(x) ## Plot the plan
## Assess whether the plan can meet desired risk points
assess(x, PRP=c(0.01, 0.95), CRP=c(0.05, 0.04))
## A plan based on the Hypergeometric distribution
x <- OC2c(10,1, type="hypergeom", N=5000, pd=seq(0,0.5, 0.025))
plot(x)
## The summary
x <- OC2c(10,1, type="hypergeom", N=5000, pd=seq(0,0.5, 0.1))
summary(x, full=TRUE)
## Plotting against a function which generates P(defective)
xm <- seq(-3, 3, 0.05) ## The mean of the underlying characteristic
x <- OC2c(10, 1, pd=1-pnorm(0, mean=xm, sd=1))
plot(xm, x) ## Plot P(accept) against mean
}
\keyword{classes}
|
9b42226ed8d5f2caf7ba261e93bfbc5ea311fccb | 08ac50745353290c77f57652ff0001623a425f4d | /analysis/analyze_data.R | dd193e9ca83f29d243ebe3de5984975df726708b | [] | no_license | cwru-robotics/turtlebot-estimation | e2f43d4b9c8018c7416359a127425783feb91c69 | 0b9013ff69be2a4ef88cbfebfcc2d47401185b54 | refs/heads/master | 2020-07-09T04:42:43.213587 | 2016-09-18T23:51:54 | 2016-09-18T23:51:54 | 66,859,804 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,158 | r | analyze_data.R | #!/usr/bin/env Rscript
# R Script to analyze data from experiments
# Usage is analyze_data.R experiment1 experiment2...
# If experiment names to render are not specified then all experiments are rendered
# Must change the data_dir to your user directory!
# TODO make this use environment variable to detect home directory
data_dir <- "/home/USER/thesis/experiment_data"
dirs <- list.dirs(path=data_dir, recursive=FALSE) # Find all experiments in the data_dir
report_dir <- paste0(data_dir, "/reports") # Create a subdirectory in the data_dir to store reports
args <- commandArgs(trailingOnly = TRUE)
# Function to generate reports for all data in the data_dir if no argument was passed into function call
renderAll <- function() {
message("No args received, rendering all experiments")
for (directory in 1:length(dirs)){
experiment_name = substr(dirs[directory], nchar(data_dir)+2, nchar(dirs[directory]))
files <- list.files(path=dirs[directory], pattern="turtlebot([0-9])+_gazebo_odom.csv")
if (experiment_name != "reports" && experiment_name != "old") {
message(paste0("Rendering experiment ", experiment_name))
for (file in 1:length(files)) { # Generate a report for each robot in the experiment
if (file < 10) {
robot_number <- substr(files[file], 10, 10)
}
else if (file < 100) {
robot_number <- substr(files[file], 10, 11)
}
rmarkdown::render("robot.Rmd",
params=list(experiment=experiment_name, robot=robot_number),
output_file=paste0("turtlebot_", robot_number, ".pdf"),
output_dir=paste(report_dir, experiment_name, sep="/"),
quiet=TRUE)
}
# Generate a report for the experiment as a whole
rmarkdown::render('experiment.Rmd',
params=list(experiment=experiment_name, robots=length(files)),
output_file=paste0(experiment_name, ".pdf"),
output_dir=paste(report_dir, experiment_name, sep="/"),
quiet=TRUE)
}
}
}
# Renders specific experiments that were passed in as args to script call
renderSome <- function() {
message("Some args received, only rendering specified experiments")
for (directory in 1:length(dirs)){
experiment_name = substr(dirs[directory], nchar(data_dir)+2, nchar(dirs[directory]))
files <- list.files(path=dirs[directory], pattern="turtlebot([0-9])+_gazebo_odom.csv")
if (is.element(experiment_name, args)) {
message(paste0("Rendering experiment ", experiment_name))
for (file in 1:length(files)) {
if (file < 10) {
robot_number <- substr(files[file], 10, 10)
}
else if (file < 100) {
robot_number <- substr(files[file], 10, 11)
}
rmarkdown::render("robot.Rmd",
params=list(experiment=experiment_name, robot=robot_number),
output_file=paste0("turtlebot_", robot_number, ".pdf"),
output_dir=paste(report_dir, experiment_name, sep="/"),
quiet=TRUE)
}
rmarkdown::render('experiment.Rmd', params=list(experiment=experiment_name, robots=length(files)),
output_file=paste0(experiment_name, ".pdf"),
output_dir=paste(report_dir, experiment_name, sep="/"),
quiet=TRUE)
}
}
}
message('Ready to analyze experiment data!')
# Accept command line args to control if we render all reports, or just for some experiments
if (length(args) == 0) {
# If no args, render everything that we can
renderAll()
} else {
# If some args, render only experiments with names matching the passed args
renderSome()
}
|
11065455bceabd80f6cc593b4f4ed0862849be15 | 5833b6528abf04acc1c595942a37d2cd336d59dd | /inst/shinyapp/capionis/dashboard/plot.survival.R | f518d5fcc9165338fbfed7645320fb31fd39855f | [] | no_license | sakho3600/WAHEco | f108a705e9372b07c098dcba9599128abde2e5e6 | 8dc13ed6df08bbee56860725d13f59df80845414 | refs/heads/master | 2020-05-20T12:49:52.297351 | 2017-08-21T22:06:14 | 2017-08-21T22:06:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,382 | r | plot.survival.R | survival.ROUTER <- "/survival"
survival.NAMESPACE_ID <- "survival-trans"
survival.view <- function(ns){
tagList(
fluidRow(
box(title = "Courbe de survie", status = "primary", solidHeader = TRUE,
plotOutput(ns("plot2"))),
box(title = "Évolution de la population", status = "primary", solidHeader = TRUE,
plotOutput(ns("plot1")))
)
)
}
survival.itemMenu <- menuSubItem("Courbes Survie",
href=survival.ROUTER, newtab = FALSE,
icon = shiny::icon("line-chart"))
survival.server <- function(input, output, session, ...) {
output$plot1 <- renderPlot({
print(plot(eco_mod, type = "counts", panel="by_state"))
})
output$plot2 <- renderPlot({
interval_cycles <- eco_mod$eval_strategy_list$chvp$values$markov_cycle
plotting <- plot(interval_cycles, eco_mod$eval_strategy_list$chvp$parameters$p_fct_param_chvp,
type='l', col='blue', xlab="Time (cycles)", ylab="Probabilité état")
lines(interval_cycles, eco_mod$eval_strategy_list$rchvp$parameters$p_fct_param_rchvp, col="red")
lines(interval_cycles, eco_mod$eval_strategy_list$chvp$parameters$p_fct_parm_exp_chvp,
lty=3, lwd=3, col="green")
lines(interval_cycles, eco_mod$eval_strategy_list$rchvp$parameters$p_fct_parm_exp_rchvp,
lty=3, lwd=3, col="violet")
})
}
|
a855323e4218a9c49d7be93a1f75e6b679747181 | 4bb1aca720b47e74d86202b6e4417c63a64d91d8 | /Ventdata_EEZcheck.R | fa2dfad3b28a52fb9e9d40073f011bb471da1720 | [] | no_license | sbeaulieu/vents-Drupal | 4cd7b04be8d79ff288f5b3ca2d5f574053064191 | cc41a073696db9de90638285fe019914f1e2b628 | refs/heads/master | 2021-07-09T23:18:48.904636 | 2021-04-17T12:27:54 | 2021-04-17T12:27:54 | 36,871,545 | 1 | 1 | null | 2021-04-17T12:27:54 | 2015-06-04T13:22:20 | PHP | UTF-8 | R | false | false | 921 | r | Ventdata_EEZcheck.R | # R script point-in-polygon to check older version EEZ categories used in Vents Database against categories in VLIZ World EEZ Ver 11 from Nov 2019
# I couldn't figure out how to do reverse geocode using R package mregions, nor could I figure out how to login to LifeWatch web services
library(rgdal)
setwd("C:/Users/sbeaulieu/Desktop")
vents <- read.csv("VentdataSortedclean_20200321.csv")
coordinates(vents) <- c("Longitude", "Latitude")
EEZs <- readOGR(".", "eez_v11") # takes a minute
proj4string(vents) <- proj4string(EEZs)
inside.EEZ <- !is.na(over(vents, as(EEZs, "SpatialPolygons")))
# use 'over' again, this time with EEZs as a SpatialPolygonsDataFrame
# object, to determine which EEZ (if any) contains each sighting, and
# store the EEZ name as an attribute of the vents data
vents$EEZ <- over(vents, EEZs)$GEONAME
# write the augmented vents dataset to CSV
write.csv(vents, "vents-by-EEZ.csv", row.names=FALSE) |
ed7778e5867321f43a0e29773025159b0868996f | 62b3f0aaf7a0532c8f752052bf5ab9cb7f8b024e | /man/ggplot.profr.Rd | 7c582107d77044d7892e63917f94bde581a46ca3 | [] | no_license | Barbleiss/profr | c6ebedb922ab7edf0dc9af924e2c60f2a3c2fc18 | ab520c615d80bafefbde15eef85263b533d717b2 | refs/heads/master | 2020-06-04T08:37:13.450908 | 2018-12-06T13:41:55 | 2018-12-06T13:41:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 813 | rd | ggplot.profr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output.r
\name{ggplot.profr}
\alias{ggplot.profr}
\title{Visualise profiling data with ggplot2.
Visualise profiling data stored in a \code{profr} data.frame.}
\usage{
ggplot.profr(data, ..., minlabel = 0.1, angle = 0)
}
\arguments{
\item{data}{profile output to plot}
\item{...}{other arguments passed on to \code{\link[ggplot2]{ggplot}}}
\item{minlabel}{minimum percent of time for function to get a label}
\item{angle}{function label angle}
}
\description{
This will plot the call tree of the specified stop watch object.
If you only want a small part, you will need to subset the object
}
\examples{
if (require("ggplot2")) {
ggplot(nesting_prof)
ggplot(reshape_prof)
}
}
\seealso{
\code{\link{plot.profr}}
}
\keyword{hplot}
|
bedc03abc1f8a193d7c63c3c939debeef767a74a | 0b1b36fe7d540a2f1aeb024b8fc1d0805bb54dc5 | /R code/180918.R | 0565198704e90f33fb970d76eea3e03724f7b29c | [] | no_license | Kim-Ayeong/R_Hadoop_class | b67c8548396fd56b8d53828a3b0b159fff71dfb5 | 421fb2b0960fa06d5d642a2b48307bdb62b5b258 | refs/heads/master | 2022-04-07T14:51:26.434800 | 2020-03-13T11:18:57 | 2020-03-13T11:18:57 | 235,706,419 | 0 | 0 | null | null | null | null | UHC | R | false | false | 3,005 | r | 180918.R | #9월18일
#lec3_updated2
#factor
x <- factor(c("a", "b", "b", "a"))
str(x) #1,2는 임의로 부여한 factor 수준 값
x[2] <- "a" #가능
x[2] <- "c" #불가능, 미싱값 입력
x <- rep( x = c(0,1), times = c(3,17) )
(Gender <- factor(x)) #convert the numeric vector as a factor
(Gender <- factor(x, levels=c(0, 1), labels=c("M", "F")))
par(mfrow = c(1,2))
plot(Gender, col = c(1,2))
# Ordered factor
x <- c(1, 1, 1, 2, 2, 2, 3, 3, 3)
(Income <- ordered(x, levels=c(1, 2, 3), labels=c("L", "M", "H")))
#lec5
{1+2; 3+4; 6+8} #14, 마지막 식만 결과값만 출력
#if문
str(mtcars)
x <- mtcars$mpg
if(is.numeric(x)) boxplot(x, border="blue", col="lightblue", pars=list(boxwex=0.3))
#border:선의 색, col:채우기 색, boxplot의 상대적인 길이가 0.3
x <- factor(mtcars$cyl)
if( is.numeric(x) ) boxplot(x) else {
if(is.factor(x)) {
tmp <- table(x)
nlev <- length(levels(x))
barplot(tmp, col = (1:nlev) + 1)
}
}
#for문
x <- mtcars$mpg
(nn <- length(x))
y <- rep("NA", nn)
for(i in 1:nn) {
tmp <- x[i]
if(tmp < 12) y[i] <- "low"
if(tmp >= 12 & tmp < 17) y[i] <- "moderate"
if(tmp >= 17) y[i] <- "high"
}
y
#repeat, while문보다 for문 사용을 권장
#function
simfun1 <- function(x){
res <- x^2
return(res)
}
class(simfun1)
simfun1(1:3)
simfun2 <- function(x){
res <- x^2
invisible(res)
}
simfun2(1:3) #결과값이 보이지 않음
(tmp <- simfun2(1:3)) #할당 후에는 결과값이 보임
simfun3 <- function(x){
res <- simfun1(x)
ress <- res^2
return(ress)
}
simfun3(1:3)
mvfun <- function(x){
nn <- length(x)
res.mean <- sum(x)/nn
res.var <- sum((x-res.mean)^2)/(nn-1)
res <- list(mean=res.mean, var=res.var)
return(res)
}
mvfun(1:3)
#The . . . argument : 이전에 쓰던 argument를 다음 함수에도 전달해줄 수 있음.
simplot1 <- function(x, y){
fit <- lm(y~x)
plot(x, y, col=4, pch=16 )
abline(fit, col=2, lwd=2)
}
simplot1(x=mtcars$mpg, y=mtcars$cyl)
simplot2 <- function(x, y, lcol=2, lwd=2, ... ){
fit <- lm(y~x)
plot(x, y, ...) #plot 속 ...을 함수 인자로도 쓸 수 있음.
abline(fit, col=lcol, lwd=lwd) #plot의 ...인자와 충돌할 수 있으므로, lcol, lwd로 따로 지정
}
simplot2(x=mtcars$mpg, y=mtcars$cyl) #디폴트값 그대로 받아오기
simplot2(x=mtcars$mpg, y=mtcars$cyl, col=mtcars$cyl, pch=23) #plot 함수의 ...인자로 전달
simplot2(x=mtcars$mpg, y=mtcars$cyl, lcol=4, lwd=1, col=mtcars$cyl, pch=23)
plot(1:25, col=1:25, pch=1:25, cex=2) #이 중에서 골라 사용하기
#Exercise 2
simfun4 <- function(x){
if(is.numeric(x)) x.num <- x
else stop("x must be a numeric vector!")
res <- x.num^2
return(res)
}
simfun4(1:3)
simfun4(rep("m",3))
#Exercise 3
guessfun <- function(x){
if(!is.character(x) || length(x)>1) stop("x must be a character H or T!")
res <- sample( x = c("H","T"), size = 1 )
if( is.character(x) && x == res) print("You win!")
else print("You lose!")
}
guessfun(1)
guessfun(c("H","T"))
guessfun(H)
guessfun("H")
|
725d7300f000c9ccc365bf089ddd1e92e0dedc82 | a2aee752d7fd804ded63cafb587a25d6911f0db8 | /tests/testthat/test-KISdata.R | 71cd4e9f6421f7c1115c6a9e4d427cc35c9f1ac5 | [] | no_license | jeroenbrons/knmiR | e8ca0d3cf5130d464f9239847059a93c7b46ce32 | d1d9c9f3cdad6053455916a8fd1312130a8d42d5 | refs/heads/master | 2021-07-13T06:30:18.545480 | 2017-07-04T13:36:18 | 2017-07-04T13:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,081 | r | test-KISdata.R | library(futile.logger)
flog.threshold(DEBUG)
flog.appender(appender.file("knmiR.log"))
context("KIS data extraction")
node <- Sys.info()["nodename"]
test_that("Obtain temperature", {
skip_on_travis()
skip_if_not(grepl("knmi.nl", node))
expect_match(WriteKISRecipe("TG", "260_H", "2016"), "KIStable.txt")
expect_error(KIS("rr", "260_H", "2016"),
"Must be element of set {'TG', 'MOR_10'}.", fixed = TRUE)
result <- KIS("TG", "260_H", "2016-08/2016-09")
expect_equal_to_reference(result, file = "testOutput/temperatureDeBilt.rds")
result <- KIS("TG", "310_H", "2016-08/2016-09")
expect_equal_to_reference(result,
file = "testOutput/temperatureVlissingen.rds")
})
test_that("Obtain MOR_10", {
skip_on_travis()
skip_if_not(grepl("knmi.nl", node))
result <- KIS("MOR_10", "260_A_a", "2016-11-10")
expect_equal_to_reference(result, file = "testOutput/MOR_10_DeBilt.rds")
})
test_that("Get error outside", {
skip_if_not(!grepl("knmi.nl", node))
expect_error(KIS("TG", "260_H", "2016"),
"function works only inside KNMI", fixed = TRUE)
})
|
c1f8abf41514495fc6f8fac61a3b16d092dec596 | d30ae83d6357263f704e9d4ff5e18b0b997a9e7d | /Finding eigenvalues.R | d688d262c51799bd6a6cf5e9069b16383d639cbb | [] | no_license | gng-ucdavis/Chapter1-code | 5ee11538c5ec11ef8d41db471b0f887564eead80 | 65ed350177b4d469889c3675c0c1b1e30b02ba4c | refs/heads/master | 2023-07-02T06:45:37.781810 | 2021-08-07T23:28:00 | 2021-08-07T23:28:00 | 209,189,130 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,849 | r | Finding eigenvalues.R | ####9/17/19 This script is to look for eigenvalues in your Jacobian matrix. Solving for this will let me know if my solutions are stable or not (doesn't say if it is the only stable one)
###This is the first part of the stability analysis. The script 'solving for hyseresis' comes later
##The following are the partial derivatives of my jacobian matrix (corrected for the flipped m).
###And that's mostly it. This has the up to date parital derivative (9/17/19) that will be used for stability analyses
aa=-a*H*(1+s)/(1+s+f*m*P)-2*B+1 ###This is the partial derivative of the basal resource equation wrt the basal resource. The reason why this looks weird is because I flipped the (1+s) to the numerator to keep the equations neater. *shrug
bb=-a*B*(1+s)/(1+s+f*m*P) #This is the partial derivative of the basal resource wrt the prey population
cc=a*f*m*B*H*(1+s)/(1+s+f*m*P)^2 #This is the partial derivative of the basal resource wrt the predator population
dd=a*H*(1+s)/(1+s+f*m*P) #This is the partial derivative of the prey wrt the basal resource
ee=a*B/(1+((f*m*P)/(1+s)))-b*P/((1+f*P/(1+s))*(1+n*s))-dh #This is the partial derivative of the prey wrt the prey
ff=-a*f*m*B*H/((1+s)*(1+f*m*P/(1+s))^2)-b*H/((1+n*s)*(1+(f*P)/(1+s)))+b*f*H*P/((1+s)*(1+n*s)*(1+(f*P)/(1+s))^2) #This is the partial derivative of the prey wrt the predator
gg=0 #This is the partial derivative of the predator equation wrt the basal resource
hh=b*P*(1+s)/((1+s+f*P)*(1+n*s)) #This is the partial derivative of the predator equation wrt the prey
ii=b*H/((1+f*P/(1+s))*(1+n*s))-b*f*H*P/((1+s)*(1+n*s)*(1+f*P/(1+s))^2)-dp #This is the partial derivative of the predator equation wrt the predator
##So what do I do with this? Input values for these 9 equations and find the eigenvalues
jmat=(matrix(c(aa,bb,cc,dd,ee,ff,gg,hh,ii), ncol=3, byrow=T))
eigen(jmat)$values
|
bf1e66a0741efb76fa452b0ac25ae41545cefda2 | ea3318080191c56d0d83c189325534919cc86794 | /plot1.R | 1f813999c50d4e388b7fd02df1e5b2724bba25d8 | [] | no_license | agcode/Exdata_NEI | 150a8c8d29f66708bd6cc156430560267e2ece6e | 078ec2698db04378f513c8f6f07854225e8cc63f | refs/heads/master | 2021-01-10T14:54:24.667032 | 2015-11-02T08:42:16 | 2015-11-02T08:42:16 | 44,660,446 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,215 | r | plot1.R | ## Exploratory Data Analysis Assignment 2
## Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
## Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Take the aggregate of emissions over years
xx <- aggregate(NEI$Emissions ~ NEI$year, FUN=sum)
# Using base plotting system
png("plot1.png",width= 480,height= 480,units= "px")
#barplot(xx[[2]],xx[[1]],xlab="Year",ylab="Emissions")
bp<-barplot(xx[[2]],xx[[1]],xlab="Year",ylab="Emissions",names.arg = c(1999, 2002, 2005, 2008),main="Total Emissions from PM2.5",col = c("red","sienna","palevioletred1","royalblue2"))
text(bp, 0, round(xx[[2]], 1),cex=1,pos=3)
#boxplot(NEI$Emissions ~ NEI$year,data=NEI,outline=FALSE,xlab="Year",ylab="Emissions",main = "Total emissions from PM2.5")
#ggplot()+geom_point(data=xx,aes(x=xx[["NEI$year"]],y=xx[["NEI$Emissions"]]),colour = 'red', size = 3)
#ggplot(xx,aes(x=xx[["NEI$year"]],y=xx[["NEI$Emissions"]]))+geom_bar(stat="identity")+xlab("Year")+ylab("Emissions")+ggtitle("Aggregate Emissions per Year")
dev.off() |
8975ac018930eed8129bf82b80c23459220aaa7e | 560bac32951f722ddb6636bac4ca3ba02ca418f2 | /dmpabook/c07.R | 1284ce004fed9d0dbe2943a04e3738643991cadb | [] | no_license | anhnguyendepocen/rBooks | 26ddf16647e4435263ba5dc67a8ccf3f76f0798e | 274bf5178e1b4eb14427f74f7f825d4b25ece75b | refs/heads/master | 2021-09-20T01:12:50.148676 | 2018-08-02T05:41:55 | 2018-08-02T05:41:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 38 | r | c07.R | # Chap- 7: Preparing to Model the Data |
6643e7ae39d9fac217734b65c675d41d8ff79ef0 | 2196661c2667ec48fe05185b79ff4787e7fd9bb0 | /man/QQPlot-comma-AnnualAggLossDevModelOutput-dash-method.Rd | a52e7228fca87d18a3ce55d4a98be48776f92495 | [] | no_license | cran/BALD | 36549ff72ae813d1dc2bb25fd77dbd7db663cbbb | c528ec69adcab7ad3d278f88905782e8d5ec0c12 | refs/heads/master | 2021-07-10T02:35:40.347434 | 2018-10-22T11:00:07 | 2018-10-22T11:00:07 | 154,136,226 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 648 | rd | QQPlot-comma-AnnualAggLossDevModelOutput-dash-method.Rd | \name{QQPlot,AnnualAggLossDevModelOutput-method}
\alias{QQPlot,AnnualAggLossDevModelOutput-method}
\title{A method to plot a Q-Q plot for models in the BALD package.}
\description{A method to plot a Q-Q plot for models in the \pkg{BALD} package.}
\details{This function plots sorted observed log incremental payments vs sorted predicted log incremental payments.
Credible intervals are also plotted.}
\value{NULL. Called for the side effect of plotting.}
\docType{methods}
\seealso{\code{\link{QQPlot}}
\code{\link{triResi}}}
\arguments{\item{object}{The object of type \code{AnnualAggLossDevModelOuput} from which to plot the values.}}
|
b4e3bb0e21bd20339f92d8011bbe56103220b399 | ddc72e526751d804a51d76dc1a69d066aa037810 | /global.R | 674f1e99fcf9624e7e9cbe23da2907ec10e46eea | [] | no_license | soulj/SkeletalVis-Shiny | 6c5a2dba0ed4913e776b8a090336e14a6b5bcfbb | 0517f551448d011f690454514016c8f56efaf73a | refs/heads/master | 2020-03-18T06:41:44.499049 | 2018-09-20T13:14:28 | 2018-09-20T13:14:28 | 134,410,379 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 819 | r | global.R | library(feather)
library(readr)
library(enrichR)
foldChangeTable <- read_feather("foldChangeTable.feather")
pvalTable <- read_feather("pvalTable.feather")
#load the accessions
accessions <- read.delim("accessions.txt",stringsAsFactors = F)
accessions$combined <- paste0(accessions$accession,"_",accessions$comparison)
#Load the experiment table
expTable<-read_csv(file = "data/expTable.csv")
#load the signature lists for the response comparisons
upSigs <- readRDS(file="foldChangeListUp.pval.RDS")
downSigs <- readRDS(file="foldChangeListDown.pval.RDS")
#load the chrDir lists
chrDirsList <- readRDS("chrDirs.RDS")
#load the human2otherspecies homology table
human2otherspecies <- as.data.frame(read_feather("data/human2otherspecies.feather"))
#get the enrichR databases
databases <- sort(listEnrichrDbs()[,1])
|
6ea87d4bb1fdf0cf2cb0452883239724ef003466 | fdd8b4769615fbd1c7dd9cb3fa98a144384046be | /Problem 4/4_2.R | ab4f483435f4e89ce3cb4d7c3c5acf8cc34ba3c3 | [] | no_license | Shruti0490/R | 2752cf88e2413a96cffdf438e1bbdaf586f2c87e | 3e77814b120fc70ac95cedb4221fa7c96c9dea61 | refs/heads/master | 2022-12-30T11:41:19.854820 | 2020-10-22T14:27:09 | 2020-10-22T14:27:09 | 291,740,398 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,784 | r | 4_2.R | ## Q 4.2
library(MASS)
data(package='MASS')
attach(UScereal)
summary(UScereal)
df <- UScereal
# 1. The relationship between manufacturer and shelf
# both are CATEGORIES
barplot(table(df$shelf, df$mfr),
main="4.2 - Manufacturer & Shelf",
xlab="Manufacturer",
ylab="Shelf",
col=rainbow(3),
legend=T)
dev.off()
# 2. The relationship between fat and vitamins
# fat is NUMERICAL, vitamins are CATEGORICAL
boxplot(fat ~ vitamins,
main="4.2 - Vitamins & Fat",
xlab="Vitamins",
ylab="Fat")
dev.off()
# 3. the relationship between fat and shelf
# fat is NUMERICAL, shelf if CATEGORICAL
boxplot(fat ~ shelf,
main="4.2 - Shelf & Fat",
xlab="Shelf",
ylab="Fat")
dev.off()
# 4. the relationship between carbohydrates and sugars
# both carbo and sugars are NUMERICAL, and possibly
# with correlation?
# plotting points and linear fit attempt to see
# if there is a relation...
plot(carbo, sugars,
main="4.2 - Carbo & Sugars",
xlab="Carbo",
ylab="Sugars")
lm <- lm(sugars ~ carbo)
abline(lm, col="red")
dev.off()
# 5. the relationship between fibre and manufacturer
# mfr is CATEGORICAL, fibre is NUMERICAL
boxplot(fibre ~ mfr,
main="4.2 - Mfr & Fibre",
xlab="Mfr",
ylab="Fibre")
dev.off()
# 6. the relationship between sodium and sugars
# sodium and sugars are both NUMERICAL
# plotting points and linear fit to see if there
# is some relation
plot(sugars, sodium,
main="4.2 - Sugars & Sodium",
xlab="Sugars",
ylab="Sodium")
lm <- lm(sodium ~ sugars)
abline(lm, col="red")
dev.off()
detach(UScereal)
|
171eba846156a1583082422b5742ce3b8047b552 | 42d44c41040f17ae75b6ec035fb4958f8a9302cf | /SCW.R | 27af1c678b72f91d149477aefbc013d6d4ca8e36 | [] | no_license | WenjuanW/Online-WAPA | 7a7f3145183dd21a28e40eb1abcf8899e6ab08b9 | eb5fc7d113af63c1f66ec7c714c6365928f6537d | refs/heads/master | 2020-03-20T21:57:14.320408 | 2018-06-20T08:55:10 | 2018-06-20T08:55:10 | 137,770,450 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,953 | r | SCW.R |
SCWI <- function(streamData1,label1,c,eta){
streamData1 <- as.matrix(streamData1)
label1 <- as.matrix(label1)
k <- 1
# k <- 3
streamData <- apply(streamData1, 2, rep, k)
y <- apply(label1, 2, rep, k)
Ncols <- ncol(streamData)
Nrows <- nrow(streamData)
u <- matrix(0, nrow = Ncols, ncol = 1)
Sigma <- diag(1,Ncols,Ncols)
prediction <- matrix(0, nrow = Nrows, ncol = 1)
loss <- matrix(0, nrow = Nrows, ncol = 1)
# AE <- matrix(0, nrow = Nrows, ncol = 1)
# count <- 0
for(i in 1:Nrows){
pred1 <- sign(t(u)%*%streamData[i,])
prediction[i] <- pred1
vt <- t(streamData[i,])%*%Sigma%*%streamData[i,]
fai <- 1/pnorm(eta)
## print(Norm(streamData[i,]))
## print(Sigma)
## print(streamData[i,])
## print(vt)
lt <- max(0,fai*sqrt(vt)-y[i]*(streamData[i,] %*% u))
l1 <- max(0,1-y[i]*(streamData[i,] %*% u))
loss[i] <- l1
if (lt>0){
mt <- y[i]*t(u)%*%streamData[i,]
pa <- 1+fai^2/2
xi <- 1+fai^2
stepw <- min(c,max(0,1/(vt*xi)*(-mt*pa+sqrt(mt^2*fai^4/4+vt*fai^2*xi))))
ut <- 0.25*(-stepw*vt*fai+sqrt(stepw^2*vt^2*fai^2+4*vt))^2
Betat <- stepw*fai/(sqrt(ut)+vt*stepw*fai)
u <- u + stepw*y[i]*Sigma%*%streamData[i,]
##print(is.positive.semi.definite(Sigma, tol=1e-250))
##print(is.positive.semi.definite(Sigma%*%((streamData[i,])%*%t(streamData[i,]))%*%Sigma, tol=1e-250))
Sigma <- Sigma - Betat[1,1]*Sigma%*%(streamData[i,])%*%t(streamData[i,])%*%Sigma
}
# if (pred1!=y[i]){
# count <- count +1
# AE[i] <- count/i
# }else{
# AE[i] <- count/i
# }
}
a <- length(which(prediction==y[1:nrow(prediction)]))/nrow(prediction)
#newlist <- list("pred" = a, "loss" = loss,"AverageE" = AE)
newlist <- list("pred" = a, "loss" = loss)
return(newlist)
}
#sc <- SCWI(x_train,y_train,25,0.8)$pred
|
6ce8be541f74d14c098bff86dffc6fd2b22d311a | 57d258a66ce8a56af95db6f37b1472f88e639b4f | /treelet_prepare_cv.R | 2c9c84fb92eced24b62cad447b879c0f6697cf79 | [] | no_license | dravesb/TreeletSmoothers | 97fc99ae91a95788aeb240cb82b817d534c3d6a7 | 991f775b2f770a935e39eb0a02f6ed40669fe030 | refs/heads/master | 2021-01-01T04:27:45.605741 | 2017-08-13T20:23:39 | 2017-08-13T20:23:39 | 97,177,475 | 0 | 0 | null | 2017-08-13T20:17:27 | 2017-07-14T00:55:08 | R | UTF-8 | R | false | false | 2,737 | r | treelet_prepare_cv.R | treelet_prepare_cv = function(grm_name, num_test = 50, snp_set_size = NA){
#--------------------------------
#Check for errors in the grm file
#--------------------------------
if(is.na(grm_name)){
stop("please specify grm file name \n this should be in the current directory")
}
#-------------------------------------------
# read in SNP file
#-------------------------------------------
snp_name = paste(grm_name, ".bim", sep = "")
snps = as.matrix(read.table(snp_name))[,2]
if(!exists("snps")){
stop("couldn't find the file")
}
if(is.na(snp_set_size)){
#include entire snp set
snp_set_size = length(snps)
}
if(length(snps)<snp_set_size){
message("snp_set_size is greater than the number of snps \n setting it to the max value")
snp_set_size = length(snps)
}
snps = sample(snps, snp_set_size)
#--------------------------------
#Make training and testing sets
#--------------------------------
message("writting out testing/training sets")
#get train/test indices
indices = 1:length(snps)
train_index = sample(indices, round(length(snps)/2))
test_index = indices[-train_index]
#create directory for snp sets
dir.create("./snp_sets", showWarnings = FALSE)
setwd("./snp_sets")
#write out training snps
write.table(snps[train_index], "train", col.names = F, row.names = F, quote = F)
#split the testing sets once more
size = floor(length(test_index)/50)
for(i in 1:num_test){
#get the smaller snp_set
small_snp_set = sample(test_index, size)
#remove small_snp_set from test_index
test_index = test_index[!test_index %in% small_snp_set]
#write out small_snp_set
file = paste("test",i, sep = "")
write.table(snps[small_snp_set], file, col.names = F, row.names = F, quote = F)
}
#go back a directory
setwd("..")
message("writting out grms - this may take a very long time...")
#--------------------------------
#Make training grms
#--------------------------------
#create directory for the new grms
dir.create("./cv_grms", showWarnings = FALSE)
#format snp file names
snp_file = paste(getwd(), "/snp_sets/train", sep = "")
out = paste(getwd(), "/cv_grms/train", sep = "")
#make the call to gcta
system(paste("./gcta_mac --bfile ",grm_name," --extract ",snp_file," --make-grm --out ", out, sep = ""))
#--------------------------------
#Make testing grms
#--------------------------------
for(i in 1:num_test){
#format snp file names
snp_file = paste(getwd(), "/snp_sets/test",i, sep = "")
out = paste(getwd(), "/cv_grms/test",i, sep = "")
#make the call to gcta
system(paste("./gcta_mac --bfile ",grm_name," --extract ",snp_file," --make-grm --out ",out,sep = ""))
}
} |
7925715014632680dcb0bedbb22fa3f78459f79d | 1920a6ec39111d11bca8a24089b701a55eb73896 | /man/SNP.Pattern.Rd | fcf53f4311bbc2dbd8bce8a4000890564e9d1922 | [] | no_license | benliemory/BinStrain | 1014195bd161cefaeb278858a297e3528c372ca5 | 76465748595bd394cb37c9cae811b977b2eafde8 | refs/heads/master | 2020-04-27T07:38:24.557622 | 2013-11-24T02:39:59 | 2013-11-24T02:39:59 | 14,651,642 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 350 | rd | SNP.Pattern.Rd | \name{SNP.Pattern}
\alias{SNP.Pattern}
\docType{data}
\title{
Data for SNP Pattern
}
\description{
The data describes the SNP Pattern.
}
\usage{data(SNP.Pattern)}
\format{
A data frame with 15387 observations. Numbers from 1 to 14 represents the order of the reference genomes used to generate the SNP pattern file
}
\examples{
data(SNP.Pattern)
}
|
6808e6b664e7f75a893b4c26da59232f000341c8 | 99f74103b2d72babd81ef255d99d8a7873779119 | /Ch02/2_6_String.R | 30d36d5cb6397172d0e5a1b08b43fd9872e7d161 | [] | no_license | leesiri1004/R | 6c652df0244de91ef1185adf6aa44c45382fa42f | 9b197e3258439098695f37270217bfbc167ae116 | refs/heads/master | 2023-03-08T18:08:11.641332 | 2021-02-24T07:15:31 | 2021-02-24T07:15:31 | 330,568,766 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,004 | r | 2_6_String.R | #날짜 : 2021/01/19
#이름 : 이슬이
#내용 : Ch02. 데이터 유형과 구조 - 문자 처리 교재 p84
#R 패키지 설치
install.packages('stringr')
#R 패키지 로드
library(stringr)
#문자열 정규표현식
str <- 'hong25이순신31정약용27'
rs1 <- str_extract(str, '[1-9]{2}')
rs1
rs2 <- str_extract_all(str, '[1-9]{2}')
rs2
rs3 <- str_extract_all(str, '[a-z]{3}')
rs3
rs4 <- str_extract_all(str, '[가-힣]{3}')
rs4
rs5 <- str_extract_all(str, '[^0-9]{3}')
rs5
#문자열 길이
str_length(str)
#문자열 자르기
str_sub(str, 1, 5)
#교재 p84 실습 - 문자열 추출하기
str_extract("홍길동35이순신45유관순25", "[1-9]{2}")
str_extract_all("홍길동35이순신45유관순25", "[1-9]{2}")
#교재 p85 실습 - 반복 수를 지정하여 영문자 추출하기
string <- "hongkd105leess1002you25강감찬2005"
str_extract_all(string, "[a-z]{3}") #영문 소문자가 3자 연속하는 경우 추출
str_extract_all(string, "[a-z]{3,}") #영문 소문자가 3자 이상 연속하는 경우 추출
str_extract_all(string, "[a-z]{3,5}") #영문 소문자가 3~5자 연속하는 경우 추출
#교재 p85 실습 - 문자열에서 한글, 영문자, 숫자 추출하기
str_extract_all(string, "hong") #해당 문자열 추출
str_extract_all(string, "25") #해당 숫자 추출
str_extract_all(string, "[가-힣]{3}") #연속된 3개의 한글 문자열 추출
str_extract_all(string, "[a-z]{3}") #연속된 3개의 영문 소문자 추출
str_extract_all(string, "[0-9]{4}") #연속된 4개의 숫자 추출
#교재 p86 실습 - 문자열에서 한글, 영문자, 숫자를 제외한 나머지 추출하기
str_extract_all(string, "[^a-z]") #영문자를 제외한 나머지 추출
str_extract_all(string, "[^a-z]{4}") #영문자를 제외한 연속된 4글자 추출
str_extract_all(string, "[^가-힣]{5}")#한글을 제외한 나머지 연속된 5글자 추출
str_extract_all(string, "[^0-9]{3}") #숫자를 제외한 나머지 연속된 3글자 추출
#교재 p86 실습 - 주민등록번호 검사하기
jumin <- "123456-1234567"
str_extract(jumin, "[0-9]{6}-[1234][0-9]{6}")
str_extract(jumin, "\\d{6}-[1234]\\d{6}") #d{6}: 숫자 6개
#교재 p87 실습 - 지정된 길이의 단어 추출하기
name <- "홍길동1234,이순신5678,강감찬1012"
str_extract_all(name, "\\w{7,}") #7글자 이상의 단어(숫자 포함)만 추출
#교재 p87 실습 - 문자열의 길이 구하기
string <- "hongkd105leess1002you25강감찬2005"
len <- str_length(string)
len
#교재 p87 실습 - 문자열 내에서 특정 문자열의 위치(index) 구하기
string <- "hongkd105leess1002you25강감찬2005"
str_locate(string, "강감찬")
#교재 p88 실습 - 부분 문자열 만들기
string_sub <- str_sub(string, 1, len - 7) #이전 예제의 len 변수 사용
string_sub
string_sub <- str_sub(string, 1, 23) #문자열의 위치를 이용
string_sub
#교재 p88 실습 - 문자열 교체하기
string_sub #문자열 교체 전 변수의 값
string_rep <- str_replace(string_sub, "hongkd105", "홍길동35,")
string_rep <- str_replace(string_rep, "leess1002", "이순신45,")
string_rep <- str_replace(string_rep, "you25", "유관순25,")
string_rep #문자열 교체 후 변수의 값
#교재 p89 실습 - 문자열 결합하기
string_rep #문자열 결합 전 변수의 값
string_c <- str_c(string_rep, "강감찬55")
string_c #문자열 결합 후 변수의 값
#교재 p89 실습 - 문자열 분리하기
string_c #문자열을 분리하기 전 변수의 값
string_sp <- str_split(string_c, ",") #콤마를 기준으로 문자열 분리
string_sp #문자열 분리 결과
#교재 p89 실습 - 문자열 합치기
#단계 1: 문자열 벡터 만들기
string_vec <- c("홍길동35", "이순신45", "유관순25", "강감찬55")
string_vec
#단계 2: 콤마를 기준으로 문자열 벡터 합치기
string_join <- paste(string_vec, collapse = ",")
string_join
|
97a8d1919b7fbc3f9ab5de676e34d8c30f842ea0 | b45944145af4b2d86ff4f02393b984370006eb1d | /Hackathon Refugees.R | d1f22a762ceddc8f78089745cbda72dbef507535 | [] | no_license | madeleinenic/PODS-Hackathon | 1f035bcc3058b149d0e4f0ac5b1e795b31118780 | 90d831cec3b1c5cd3a73eaad35ca7d665a31a1b3 | refs/heads/master | 2022-02-13T23:32:59.227478 | 2019-06-25T13:16:11 | 2019-06-25T13:16:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,051 | r | Hackathon Refugees.R | service_data$`Blended Sponsorship Refugee` <- as.numeric(service_data$`Blended Sponsorship Refugee`)
ggplot(data= service_data, aes(x= Province , y= `Blended Sponsorship Refugee`,
fill= `Service Type`)) + geom_bar(stat="identity")
service_data$`Government-Assisted Refugee` <- as.numeric(service_data$`Government-Assisted Refugee`)
ggplot(data= service_data, aes(x= Province , y= `Government-Assisted Refugee`,
fill= `Service Type`)) + geom_bar(stat="identity")
service_data$`Privately Sponsored Refugee` <- as.numeric(service_data$`Privately Sponsored Refugee`)
ggplot(data= service_data, aes(x= Province , y= `Privately Sponsored Refugee`,
fill= `Service Type`)) + geom_bar(stat="identity")
service_data$`Total number of requests` <- as.numeric(service_data$`Total number of requests`)
ggplot(data= service_data, aes(x= Province , y= `Total number of requests`,
fill= `Service Type`)) + geom_bar(stat="identity")
|
c0948155a954a9214afe20b747401e09ce0eb5fa | 0b551347a29f4e01e9273615ce0c5242f9bdb63a | /pkg/R/get_power_deriv.R | b0502c402f1f5005608057d9665b77fa91f40971 | [] | no_license | timemod/dynmdl | 8088fecc6c2b84d50ecb7d7b762bddb2b1fcf629 | 8dc49923e2dcc60b15af2ae1611cb3a86f87b887 | refs/heads/master | 2023-04-07T21:30:53.271703 | 2023-03-03T13:02:30 | 2023-03-03T13:02:30 | 148,925,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 454 | r | get_power_deriv.R | get_power_deriv <- function(x, p, k) {
# The k-th derivative of x^p (Used in f_dynamic)
# INPUTS
# x: base
# p: power
# k: derivative order
#
# OUTPUTS
#
if ((abs(x) < 1e-12) && (p > 0) && (k > p) && (abs(p - round(p)) < 1e-12)) {
return (0)
} else {
dxp <- x^(p - k);
for (i in 0:(k - 1)) {
dxp <- dxp * p
p <- p - 1
}
return (dxp)
}
}
|
7e48bb17aa4f268f9608e60116d0f65df6225bf8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/StrainRanking/examples/powderymildew.Rd.R | 60444ba52ba56949999522815269b2bb794eb2a1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 470 | r | powderymildew.Rd.R | library(StrainRanking)
### Name: powderymildew
### Title: Demographic and genetic real data
### Aliases: powderymildew
### Keywords: datasets
### ** Examples
## load the powderymildew data set
data(powderymildew)
## names of items of powderymildew
names(powderymildew)
## print powderymildew
print(powderymildew)
## alternatives to print one of the items of powderymildew, e.g. the 4th items:
print(powderymildew$genetic.frequencies)
print(powderymildew[[4]])
|
a2487ff34e34a16105078dea6f61cc432809b803 | 4834724ced99f854279c2745790f3eba11110346 | /man/add_dup_markers.Rd | 4be1572417edcd093dbb6f4aae3d0719d15a7eae | [] | no_license | mdavy86/polymapR | 1c6ac5016f65447ac40d9001388e1e9b4494cc55 | 6a308769f3ad97fc7cb54fb50e2b898c6921ddf9 | refs/heads/master | 2021-01-25T12:37:09.046608 | 2018-02-13T17:07:35 | 2018-02-13T17:07:35 | 123,487,219 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 520 | rd | add_dup_markers.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exported_functions.R
\name{add_dup_markers}
\alias{add_dup_markers}
\title{Add duplicate markers to a map}
\usage{
add_dup_markers(maplist, bin_list)
}
\arguments{
\item{maplist}{A list of maps. Output of MDSMap_from_list.}
\item{bin_list}{A list of marker bins containing marker duplicates. Output of screen_for_duplicate_markers.}
}
\value{
A maplist with added markers.
}
\description{
Add duplicate markers to a map
}
|
5bbe26521ac1c94c54925dab8608b9b23c44baaa | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /flying/man/birds.Rd | c383e810c74b9c9cb46738a2833506ca5a2b6548 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 923 | rd | birds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/birds_documentation.R
\docType{data}
\name{birds}
\alias{birds}
\title{Sample 28 birds}
\format{A data frame with 28 observations and 5 variables not counting the
name.
\describe{
\item{Scientific.name}{Name of bird species}
\item{Empty.mass}{Body mass in Kg. Includes fuel. All-up mass with crop
empty. Not to be confused with lean mass.}
\item{Wing.span}{Length of wings spread out in metres}
\item{Fat.mass}{Mass of fat that is consumable as fuel in Kg}
\item{Order}{Order of the spicies (passerine vs non-passerine)}
\item{Wing.area}{Area of both wing projected on a flat surface in metres
squared}
\item{Muscle.mass}{Mass in Kg. of flight muscles}
}}
\usage{
birds
}
\description{
Preset birds data, extracted from Flight program. Fat mass percentage
generated randomly where zero.
}
\keyword{datasets}
|
ce5b4b7a4e71698c67fcf80302e55c7c5bb8ff3e | 32e00dbb5c0b06a8aadb16a753479ce269310488 | /COVIDModel/server.R | c8212a2d4feb98d6a86d8e36ebd9d42096934dd1 | [] | no_license | lnsongxf/CovidShinyModel | d69ec6f372bf5db09fac0b3b3f0d6e0630119acc | e92a6dab43fcd4415cc2cddb23ffc7417348b8c0 | refs/heads/master | 2021-05-20T04:05:42.985126 | 2020-03-30T19:33:28 | 2020-03-30T19:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 43,339 | r | server.R | source('helper.R')
library(shiny)
library(ggplot2)
library(shinyWidgets)
library(data.table)
library(DT)
library(dplyr)
library(shinyjs)
# start simulation from this number of infections
# TODO: should do a test that this works...if we start with a different start.inf
# are the results different?
start.inf <- 1
r0.default <- 2.8
est.days <- 365
shinyServer(function(input, output, session) {
## ............................................................................
## Helper Modal
## ............................................................................
# modal pop-up helper screen
observeEvent(input$howtouse,{
showModal(modalDialog(
fluidPage(
HTML("<h4><b>What does this tool do?</b></h4>
The app projects numbers of <i> active cases, hospitalizations, ICU-use and
deaths</i> for COVID-19 based on local epidemiologic data under serial
public health interventions.
It can be used for regional planning for predicting hospital needs as the COVID-19
epidemic progresses.
<br><br>
<h4><b>What inputs do I need to make a prediction?</b></h4>
After setting a date for Day 0, you need to input the <i>population of the region</i> that
you are projecting for, as well as the estimated number of <i>current COVID-19 inpatients</i>.
<br><br>
You also will need some measure of the <i>reproductive number Re prior to Day 0</i>. This value
is defaulted to 2.8, which is the approximate reproductive number currently reported in literature.
<br><br>
Other parameters include rates, durations, and lags for hospitalizations, ICU, and ventilator
needs. These are defaulted to values reported in literature, but can be changed by clicking on
'Customize Other Parameters.'
<br><br>
<h4><b>How does the app predict the number of Day 0 cases and infections?</b></h4>
We use the percent of infections that result in hospitalization to make a prediction
into the actual number of infections. When doing this, we account for the lag time between infection
and hospitalization, during which more infections will have occurred.
<br> <br>
By using hospitalization numbers, we try avoid assumptions on detection rates and variable testing
patterns.
<br> <br>
<h4><b>What exactly does the app project?</b></h4>
There are three tabs of graphs:
<ul>
<li><b>Cases</b>: Shows the projected number of active, recovered, and total cases. </li>
<li><b>Hospitalization</b>: Shows the predicted hospital bed, ICU bed, and ventilator needs
over time.</li>
<li><b>Hospital Resources</b>: Allows you to enter in current regional capacity for
hospital resources, and will predict resource availability over time.</li>
</ul>
<br>
<h4><b>How can I take into account for public health interventions which may
change the dynamics of the disease?</b></h4>
Under the 'Add Interventions' section, you can add and save changes in Re over
different timepoints, which will change the projections in the graphs.
")
),
size = 'l'
)
)
})
## ............................................................................
## Prediction Field (Currently Only Hospitalizations)
## ............................................................................
output$prediction_fld <- renderUI({
numericInput(inputId = 'num_hospitalized',
label = 'Estimate of current inpatients with COVID-19 (diagnosed or not) on Day 0',
value = 50)
})
## ............................................................................
## Selection of R0 or Doubling Time
## ............................................................................
output$prior_val <- renderUI({
if (input$usedouble == TRUE){
sliderInput(inputId = 'doubling_time',
label = 'Doubling Time (days) Before Day 0',
min = 1,
max = 12,
step = 1,
value = 6)
}
else{
fluidPage(
fluidRow(
sliderInput(inputId = 'r0_prior',
label = 'Re Before Day 0',
min = 0.1,
max = 7,
step = 0.1,
value = 2.8),
actionLink('predict_re', 'Estimate Re prior to Day 0 based on data.')
)
)
}
})
output$int_val <- renderUI({
if (input$usedouble == TRUE){
sliderInput(inputId = 'new_double',
label = 'New Doubling Time (days) After Interventions',
min = 0,
max = 50,
step = 1,
value = 6)
}
else{
sliderInput(inputId = 'r0_new',
label = 'New Re After Intervention',
min = 0.1,
max = 6,
step = 0.1,
value = 2.8)
}
})
## ............................................................................
## Estimation of Re
## ............................................................................
re.estimates <- reactiveValues(
graph = NULL,
best.estimate = NULL
)
observeEvent(input$predict_re, {
showModal(
modalDialog(
useShinyjs(),
HTML('<h4> Estimate Re based on historical hospitalizations</h4>
Provide data from dates prior to Day 0 to estimate the Re value.<br><br>'),
splitLayout(
dateInput(inputId = 'date.hist',
label = 'Date',
value = input$curr_date,
max = input$curr_date,
min = input$curr_date - 14),
numericInput(inputId = 'num.hospitalized.hist',
label = 'Number Hospitalized',
value = NA)
),
actionButton('add.hist', 'Add Data'),
dataTableOutput(
outputId = 'input_hosp_dt'
),
tags$script("$(document).on('click', '#input_hosp_dt button', function () {
Shiny.onInputChange('lastClickId',this.id);
Shiny.onInputChange('lastClick', Math.random())
});"),
HTML('<br>'),
actionButton(
inputId = 'run.fit',
label = 'Estimate Re Prior to Day 0'),
div(id = "predict.ui.toggle",
fluidPage(
uiOutput('best.re'),
plotOutput('fit.plot')
)
) %>% hidden()
)
)
})
hist.data <- reactiveVal(
data.frame('Date' = character(0),
'Hospitalizations' = numeric(0),
'Day' = numeric(0))
)
observeEvent(input$add.hist,{
if (!as.character(input$date.hist) %in% as.character(hist.data()$Date) &
!is.na(input$num.hospitalized.hist)){
new.hist <- rbind(hist.data(),
list('Date' = as.character(input$date.hist),
'Hospitalizations' = input$num.hospitalized.hist,
'Day' = input$date.hist - input$curr_date
))
new.hist <- arrange(new.hist, Day)
new.hist$Date <- as.Date(as.character(new.hist$Date))
hist.data(new.hist)
updateDateInput(session,
inputId = 'date.hist',
value = input$date.hist - 1)
}
else if (as.character(input$date.hist) %in% as.character(hist.data()$Date))(
showNotification("This date has already been added.",
type = "error")
)
else{
showNotification("Please enter the hospitalization number.",
type = "error")
}
})
output$input_hosp_dt <- renderDataTable({
hist.dt <- hist.data()
if (nrow(hist.dt) > 0){
hist.dt[["Delete"]] <-
paste0('
<div class="btn-group" role="group" aria-label="Basic example">
<button type="button" class="btn btn-secondary delete" id=delhist', '_', hist.dt$Day, '>Delete</button>
</div>
')
}
hist.dt$Day <- NULL
datatable(hist.dt,
escape=F, selection = 'none',
options = list(pageLength = 10, language = list(
zeroRecords = "No historical data added.",
search = 'Find in table:'), dom = 't'), rownames = FALSE)
})
observeEvent(input$lastClick, {
if (grepl('delhist', input$lastClickId)){
delete_day <- as.numeric(strsplit(input$lastClickId, '_')[[1]][2])
hist.data(hist.data()[hist.data()$Day != delete_day,])
}
})
observeEvent(input$run.fit, {
hist.temp <- hist.data()
hist.temp <- arrange(hist.temp, desc(Date))
if (nrow(hist.temp) >= 2){
best.fit <- findBestRe(S0 = input$num_people,
gamma = params$gamma,
num.days = est.days,
day.vec = hist.temp$Day,
num_actual.vec = hist.temp$Hospitalizations,
start.inf = start.inf,
hosp.delay.time = params$hosp.delay.time,
hosp.rate = params$hosp.rate,
hosp.los = params$hosp.los,
icu.delay.time = params$icu.delay.time,
icu.rate = params$icu.rate,
icu.los = params$icu.los,
vent.delay.time = params$vent.delay.time,
vent.rate = params$vent.rate,
vent.los = params$vent.los)
best.vals <- best.fit$best.vals
df.graph <- data.frame(Date = hist.temp$Date,
Predicted = best.vals,
Actual = hist.temp$Hospitalizations)
df.melt <- melt(df.graph, id.vars = 'Date')
re.estimates$graph <- ggplot(df.melt, aes(x = Date, y = value, col = variable)
) + geom_point() + geom_line() + theme(text = element_text(size=20)) +
theme(legend.title=element_blank())
re.estimates$best.estimate <- sprintf('<br><br><h4>The best estimate for Re prior to Day 0 is <b>%s</b>.',
best.fit$best.re)
show("predict.ui.toggle")
}
else{
showNotification("You need at least two timepoints of data to make a prediction.",
type = "error")
}
})
output$best.re <- renderUI({
HTML(re.estimates$best.estimate)
})
output$fit.plot <- renderPlot({
re.estimates$graph
})
observeEvent(input$curr_date, {
hist.data(data.frame('Date' = character(0),
'Hospitalizations' = numeric(0),
'Day' = numeric(0)))
})
## ............................................................................
## Parameter selection
## ............................................................................
# initializing a set of parameters
params <- reactiveValues(
illness.length = 14,
gamma = 1/14,
hosp.delay.time = 10,
hosp.rate = 0.06,
hosp.los = 7,
icu.delay.time = 5,
icu.rate = 0.3,
icu.los = 1,
vent.delay.time = 1,
vent.rate = 0.64,
vent.los = 10,
int.new.r0 = 2.8,
int.new.double = 6,
int.new.num.days = 0,
hosp.avail = 1000,
icu.avail = 200,
vent.avail = 100
)
# modal pop-up to update parameters
observeEvent(input$parameters_modal,{
showModal(modalDialog(
fluidPage(
sliderInput('illness.length', 'Average Length of Illness (Assumed Same as Time of Infectiousness)', min = 0, max = 20, step = 1,
value = params$illness.length, width = '100%'),
sliderInput('hosp.rate', 'Percent Hospitalized Among Infections', min = 0, max = 1, step = 0.01,
value = params$hosp.rate, width = '100%'),
sliderInput('icu.rate', 'Percent ICU Admitted Among Hospitalized', min = 0, max = 1, step = 0.01,
value = params$icu.rate, width = '100%'),
sliderInput('vent.rate', 'Percent Ventilated Among ICU Admissions', min = 0, max = 1, step = 0.01,
value = params$vent.rate, width = '100%'),
sliderInput('hosp.after.inf', 'Infection to hospitalization (days)', min = 0, max = 30, step = 1,
value = params$hosp.delay.time, width = '100%'),
sliderInput('icu.after.hosp', 'Hospitalization to ICU Admission (days)', min = 0, max = 30, step = 1,
value = params$icu.delay.time, width = '100%'),
sliderInput('vent.after.icu', 'ICU Admission to Ventilation (days)', min = 0, max = 30, step = 1,
value = params$vent.delay.time, width = '100%'),
sliderInput('hosp.los', 'Hospital Length of Stay for Non-ICU Patients (days)', min = 1, max = 15, step = 1,
value = params$hosp.los, width = '100%'),
sliderInput('icu.los', 'ICU Length of Stay for Non-Ventilated Patients (days)', min = 1, max = 15, step = 1,
value = params$icu.los, width = '100%'),
sliderInput('vent.los', 'Average Ventilation Course (days)', min = 1, max = 15, step = 1,
value = params$vent.los, width = '100%')),
footer = tagList(
actionButton("save", "Save and Close")
)
)
)
})
observeEvent(input$save, {
params$illness.length = input$illness.length
params$gamma = 1/input$illness.length
params$hosp.delay.time = input$hosp.after.inf
params$hosp.rate = input$hosp.rate
params$hosp.los = input$hosp.los
params$icu.delay.time = input$icu.after.hosp
params$icu.rate = input$icu.rate
params$icu.los = input$icu.los
params$vent.delay.time = input$vent.after.icu
params$vent.rate = input$vent.rate
params$vent.los = input$vent.los
removeModal()
})
## ............................................................................
## Initialization
## ............................................................................
initial_beta_vector <- reactive({
if (input$usedouble == FALSE){
beta <- getBetaFromRe(input$r0_prior, params$gamma)
}
else{
beta <- getBetaFromDoubling(input$doubling_time, params$gamma)
}
initial.beta.vector <- rep(beta, est.days)
initial.beta.vector
})
curr.day.list <- reactive({
predict.metric <- 'Hospitalization'
num.actual <- input$num_hospitalized
find.curr.estimates(S0 = input$num_people,
beta.vector = initial_beta_vector(),
gamma = params$gamma,
num.days = est.days,
num_actual = num.actual,
metric = predict.metric,
start.inf = start.inf,
hosp.delay.time = params$hosp.delay.time,
hosp.rate = params$hosp.rate,
hosp.los = params$hosp.los,
icu.delay.time = params$icu.delay.time,
icu.rate = params$icu.rate,
icu.los = params$icu.los,
vent.delay.time = params$vent.delay.time,
vent.rate = params$vent.rate,
vent.los = params$vent.los)
})
## ............................................................................
## Interventions
## ............................................................................
output$intervention_ui <- renderUI({
if (input$showint){
fluidPage(
fluidRow(uiOutput(outputId = 'int_val'),
sliderInput(inputId = 'int_day',
label = 'Day after Day 0 Intervention is Implemented',
min = 0,
max = 365,
step = 1,
value = 0),
actionButton(inputId = 'add_intervention',
label = 'Save Intervention'))
)
}
})
observeEvent(input$showint, {
params$int.new.double <- input$doubling_time
params$int.new.r0 <- input$r0_prior
params$int.new.num.days <- 0
})
observeEvent(input$doubling_time,{
if(input$showint == FALSE){
params$int.new.double <- input$doubling_time
}
})
observeEvent(input$r0_prior,{
if(input$showint == FALSE){
params$int.new.r0 <- input$r0_prior
}
})
observeEvent(input$new_double, {
params$int.new.double <- input$new_double
})
observeEvent(input$r0_new, {
params$int.new.r0 <- input$r0_new
})
observeEvent(input$int_day, {
params$int.new.num.days <- input$int_day
})
intervention.table <- reactiveVal(
data.frame('Day' = numeric(0),
'New R0' = numeric(0))
)
observeEvent(input$usedouble, {
if (input$usedouble == TRUE){
intervention.table(
data.frame('Day' = numeric(0),
'New Double Time' = numeric(0))
)
}
else{
intervention.table(
data.frame('Day' = numeric(0),
'New Re' = numeric(0))
)
}
})
observeEvent(input$add_intervention,{
if (!params$int.new.num.days %in% intervention.table()$Day){
if (input$usedouble == TRUE){
intervention.table(rbind(intervention.table(),
list('Day' = params$int.new.num.days ,
'New.Double.Time' = params$int.new.double
)))
}
else{
intervention.table(rbind(intervention.table(),
list('Day' = params$int.new.num.days,
'New.Re' = params$int.new.r0
)))
}
intervention.table(arrange(intervention.table(), Day))
}
else{
showNotification('You have already added an intervention on this date', type = 'error')
}
})
output$int_table <- renderDataTable({
int.df <- intervention.table()
if (nrow(int.df) > 0){
int.df[["Delete"]] <-
paste0('
<div class="btn-group" role="group" aria-label="Basic example">
<button type="button" class="btn btn-secondary delete" id=delete', '_', int.df$Day, '>Delete</button>
</div>
')
}
datatable(int.df,
escape=F, selection = 'none',
options = list(pageLength = 10, language = list(
zeroRecords = "No interventions added.",
search = 'Find in table:'), dom = 't'), rownames = FALSE)
})
observeEvent(input$lastClick, {
if (grepl('delete', input$lastClickId)){
delete_day <- as.numeric(strsplit(input$lastClickId, '_')[[1]][2])
intervention.table(intervention.table()[intervention.table()$Day != delete_day,])
}
})
## ............................................................................
## Projection
## ............................................................................
beta.vector <- reactive({
int.table.temp <- intervention.table()
# determines what 'day' we are on using the initialization
curr.day <- as.numeric(curr.day.list()['curr.day'])
if (is.na(curr.day)){
# TODO: replace hacky fix to bug with non-hacky fix
curr.day = 365
new.num.days = 1000
}
# setting doubling time
if (input$usedouble == FALSE){
if (!is.null(input$r0_prior) & !is.null(params$int.new.r0)){
int.table.temp <- rbind(int.table.temp,
list(Day = c(-curr.day, input$proj_num_days, params$int.new.num.days ),
New.Re = c(input$r0_prior, NA, params$int.new.r0 )))
}
else{
int.table.temp <- rbind(int.table.temp,
list(Day = c(-curr.day, input$proj_num_days),
New.Re = c(r0.default, NA)))
}
}
else{
int.table.temp <- rbind(int.table.temp,
list(Day = c(-curr.day, input$proj_num_days, params$int.new.num.days),
New.Double.Time = c(input$doubling_time, NA, params$int.new.double )))
}
applygetBeta <- function(x){
if (input$usedouble == FALSE){
return(getBetaFromRe(as.numeric(x['New.Re']),
params$gamma))
}
else{
return(getBetaFromDoubling(as.numeric(x['New.Double.Time']),
params$gamma))
}
}
int.table.temp$beta <- apply(int.table.temp, 1, applygetBeta)
int.table.temp <- arrange(int.table.temp, Day)
int.table.temp <- int.table.temp[!duplicated(int.table.temp$Day),]
day.vec <- int.table.temp$Day
rep.vec <- day.vec[2:length(day.vec)] - day.vec[1:length(day.vec) - 1]
betas <- int.table.temp$beta[1:length(day.vec) - 1]
beta.vec <- c()
for (i in 1:length(rep.vec)){
beta <- betas[i]
reps <- rep.vec[i]
beta.vec <- c(beta.vec, rep(beta, reps))
}
beta.vec
})
sir.output.df <- reactive({
# run the same model as initialization model but run extra days
curr.day <- as.numeric(curr.day.list()['curr.day'])
new.num.days <- input$proj_num_days + curr.day
new.num.days <- ifelse(is.na(new.num.days), 365, new.num.days)
# starting conditions
start.susc <- input$num_people - start.inf
start.res <- 0
SIR.df = SIR(S0 = start.susc,
I0 = start.inf,
R0 = start.res,
beta.vector = beta.vector(),
gamma = params$gamma,
num.days = new.num.days,
hosp.delay.time = params$hosp.delay.time,
hosp.rate = params$hosp.rate,
hosp.los = params$hosp.los,
icu.delay.time = params$icu.delay.time,
icu.rate = params$icu.rate,
icu.los = params$icu.los,
vent.delay.time = params$vent.delay.time,
vent.rate = params$vent.rate,
vent.los = params$vent.los)
# shift the number of days to account for day 0 in the model
SIR.df$days.shift <- SIR.df$day - curr.day
SIR.df[SIR.df$days.shift == 0,]$hosp <- input$num_hospitalized
SIR.df$date <- SIR.df$days.shift + as.Date(input$curr_date)
SIR.df
})
## ............................................................................
## Plot Outputs
## ............................................................................
# UI depends on what graph is selected
output$plot_output <- renderUI({
if (!is.null(input$num_hospitalized)){
if (!is.na(input$num_hospitalized)){
if (input$selected_graph == 'Cases'){
fluidPage(
checkboxGroupInput(inputId = 'selected_cases',
label = 'Selected',
choices = c('Active', 'Resolved', 'Cases'),
selected = c('Active', 'Resolved', 'Cases'),
inline = TRUE),
plotOutput(outputId = 'cases.plot',
click = "plot_click")
)
}
else if (input$selected_graph == 'Hospitalization'){
fluidPage(
checkboxGroupInput(inputId = 'selected_hosp',
label = 'Selected',
choices = c('Hospital', 'ICU', 'Ventilator'),
selected = c('Hospital', 'ICU', 'Ventilator'),
inline = TRUE),
plotOutput(outputId = 'hospitalization.plot',
click = "plot_click")
)
}
else{
fluidPage(
column(4,
numericInput(inputId = 'hosp_cap',
label = 'Hospital Bed Availability',
value = params$hosp.avail)),
column(4,
numericInput(inputId = 'icu_cap',
label = 'ICU Space Availability',
value = params$icu.avail)),
column(4,
numericInput(inputId = 'vent_cap',
label = 'Ventilator Availability',
value = params$vent.avail)),
fluidPage(checkboxGroupInput(inputId = 'selected_res',
label = 'Selected',
choices = c('Hospital', 'ICU', 'Ventilator'),
selected = c('Hospital', 'ICU', 'Ventilator'),
inline = TRUE)),
plotOutput(outputId = 'resource.plot',
click = "plot_click")
)
}
}
}
})
observeEvent(input$hosp_cap, {
params$hosp.avail <- input$hosp_cap
})
observeEvent(input$icu_cap, {
params$icu.avail <- input$icu_cap
})
observeEvent(input$vent_cap, {
params$vent.avail <- input$vent_cap
})
## ............................................................................
## Dataframes for Visualization and Downloading
## ............................................................................
roundNonDateCols <- function(df){
df.new <- data.frame(df)
for (col in colnames(df.new)){
if (col != 'date'){
df.new[,col] <- round(df.new[,col])
}
}
return(df.new)
}
cases.df <- reactive({
df_temp <- sir.output.df()
df_temp <- df_temp[df_temp$days.shift >= 0,]
df_temp$Cases <- df_temp$I + df_temp$R
df_temp$Active <- df_temp$I
df_temp$Resolved <- df_temp$R
df_temp <- df_temp[,c('date', 'days.shift', 'Cases', 'Active', 'Resolved')]
colnames(df_temp) <- c('date', 'day', 'Cases', 'Active', 'Resolved')
df_temp <- roundNonDateCols(df_temp)
df_temp
})
hospitalization.df <- reactive({
df_temp <- sir.output.df()
df_temp <- df_temp[df_temp$days.shift >= 0,]
df_temp <- df_temp[,c('date', 'days.shift', 'hosp', 'icu', 'vent')]
colnames(df_temp) <- c('date', 'day', 'Hospital', 'ICU', 'Ventilator')
df_temp <- roundNonDateCols(df_temp)
df_temp
})
resource.df <- reactive({
df_temp <- sir.output.df()
df_temp <- df_temp[df_temp$days.shift >= 0,]
if (!is.null(input$hosp_cap)){
df_temp$hosp <- input$hosp_cap - df_temp$hosp
df_temp$icu <- input$icu_cap - df_temp$icu
df_temp$vent <- input$vent_cap - df_temp$vent
}
df_temp <- df_temp[,c('date', 'days.shift', 'hosp', 'icu', 'vent')]
colnames(df_temp) <- c('date', 'day', 'Hospital', 'ICU', 'Ventilator')
df_temp <- roundNonDateCols(df_temp)
df_temp
})
## ............................................................................
## Table output
## ............................................................................
output$rendered.table <- renderDataTable({
if (input$selected_graph == 'Cases'){
df.render <- cases.df()
}
else if (input$selected_graph == 'Hospitalization'){
df.render <- hospitalization.df()
}
else{
df.render <- resource.df()
}
df.render$date <- format(df.render$date, format="%B %d, %Y")
datatable(data=df.render,
escape=F, selection = 'single',
options = list(pageLength = 10,
lengthChange = FALSE,
searching = FALSE), rownames = FALSE)
})
observeEvent(input$rendered.table_row_last_clicked,{
row.id <- input$rendered.table_row_last_clicked
if (input$selected_graph == 'Cases'){
df.table = cases.df()
}
else if (input$selected_graph == 'Hospitalization'){
df.table = hospitalization.df()
}
else{
df.table = resource.df()
}
select.date <- df.table[row.id,'date']
plot_day(select.date)
})
## ............................................................................
## Graphs
## ............................................................................
plot_day <- reactiveVal(NULL)
observeEvent(input$curr_date, {
plot_day(input$curr_date)
})
observeEvent(input$plot_click, {
plot_day(as.Date(round(input$plot_click$x), origin = "1970-01-01"))
proxy <- dataTableProxy(
'rendered.table',
session = shiny::getDefaultReactiveDomain(),
deferUntilFlush = TRUE
)
selectRows(proxy, plot_day() - input$curr_date + 1)
selectPage(proxy, ceiling((plot_day() - input$curr_date + 1) / 10))
})
observeEvent(input$goright, {
if (plot_day() != input$curr_date + input$proj_num_days){
plot_day(plot_day() + 1)
proxy <- dataTableProxy(
'rendered.table',
session = shiny::getDefaultReactiveDomain(),
deferUntilFlush = TRUE
)
selectRows(proxy, plot_day() - input$curr_date + 1)
selectPage(proxy, ceiling((plot_day() - input$curr_date + 1) / 10))
}
})
observeEvent(input$goleft, {
if (plot_day() != input$curr_date){
plot_day(plot_day() - 1)
proxy <- dataTableProxy(
'rendered.table',
session = shiny::getDefaultReactiveDomain(),
deferUntilFlush = TRUE
)
selectRows(proxy, plot_day() - input$curr_date + 1)
selectPage(proxy, ceiling((plot_day() - input$curr_date + 1) / 10))
}
})
output$hospitalization.plot <- renderPlot({
df.to.plot <- hospitalization.df()
df.to.plot$day <- NULL
if (length(input$selected_hosp) != 0){
cols <- c('date', input$selected_hosp)
df.to.plot <- df.to.plot[,cols]
df_melt <- melt(df.to.plot, 'date')
ggplot(df_melt, aes(x = date, y = value, col = variable)) + geom_point() + geom_line(
) + geom_vline(xintercept=input$curr_date) + theme(text = element_text(size=20)
) + geom_vline(xintercept=plot_day(), color = 'red') + ylab('')
}
})
output$resource.plot <- renderPlot({
df.to.plot <- resource.df()
df.to.plot$day <- NULL
if (length(input$selected_res) != 0){
cols <- c('date', input$selected_res)
df.to.plot <- df.to.plot[,cols]
df_melt <- melt(df.to.plot, 'date')
ggplot(df_melt, aes(x = date, y = value, col = variable)) + geom_point() + geom_line(
) + geom_vline(xintercept=input$curr_date) + theme(text = element_text(size=20)
) + geom_vline(xintercept=plot_day(), color = 'red') + geom_hline(yintercept = 0) + ylab('')
}
})
output$cases.plot <- renderPlot({
df.to.plot <- cases.df()
df.to.plot$day <- NULL
if (length(input$selected_cases) != 0){
cols <- c('date', input$selected_cases)
df.to.plot <- df.to.plot[,cols]
df_melt <- melt(df.to.plot, 'date')
ggplot(df_melt, aes(x = date, y = value, col = variable)) + geom_point(
) + geom_line() + geom_vline(xintercept=input$curr_date) + theme(text = element_text(size=20)
) + geom_vline(xintercept=plot_day(), color = 'red') + ylab('')
}
})
## ............................................................................
## Natural Language Outputs
## ............................................................................
# Estimated number of infections
output$infected_ct <- renderUI({
infected <- curr.day.list()['infection.estimate']
cases <- as.numeric(curr.day.list()['infection.estimate']) +
as.numeric(curr.day.list()['recovered.estimate'])
curr_date <- format(input$curr_date, format="%B %d, %Y")
HTML(sprintf('<h4>On %s (Day 0), we estimate there have been <u>%s total cases</u> of COVID-19 in the region, with
<u>%s people actively infected</u>.</h4>', curr_date, cases, infected))
})
# Word description
output$description <- renderUI({
df_temp <- sir.output.df()
select.row <- df_temp[df_temp$date == plot_day(),]
select.date <- format(select.row$date, format="%B %d, %Y")
select.day <- select.row$days.shift
if (input$selected_graph == 'Cases'){
cases <- round(select.row$I + select.row$R)
active <- floor(select.row$I)
if (length(select.day) != 0){
if (select.day == 0){
HTML(sprintf('<h4>On %s (Day <b>%s</b>), there are <b>%s COVID-19 cases</b> in the region,
with <b>%s actively infected</b>.</h4>',
select.date, select.day, cases, active))
}
else{
HTML(sprintf('<h4>On %s (Day <b>%s</b>), there will be <b>%s COVID-19 cases</b> in the region,
with <b>%s actively infected</b>.</h4>',
select.date, select.day, cases, active))
}
}
}
else if (input$selected_graph == 'Hospitalization'){
hosp <- round(select.row$hosp)
icu <- round(select.row$icu)
vent <- round(select.row$vent)
if (select.day == 0){
HTML(sprintf('<h4>On %s (Day <b>%s</b>), there are <b>%s hospitalized from COVID-19</b> in the region,
with <b>%s in ICU care</b> and <b>%s on ventilators</b>.</h4>',
select.date, select.day, hosp, icu, vent))
}
else{
HTML(sprintf('<h4>On %s (Day <b>%s</b>), there will be <b>%s hospitalized from COVID-19</b> in the region,
with <b>%s in ICU care</b> and <b>%s on ventilators</b>.</h4>',
select.date, select.day, hosp, icu, vent))
}
}
else{
hosp_res <- input$hosp_cap - round(select.row$hosp)
icu_res <- input$icu_cap - round(select.row$icu)
vent_res <- input$vent_cap - round(select.row$vent)
if (select.day == 0){
HTML(sprintf('<h4>On %s (Day <b>%s</b>), there are <b>%s hospital beds available</b> in the region,
with <b>%s available ICU beds</b> and <b>%s available ventilators</b>.</h4>',
select.date, select.day, hosp_res, icu_res, vent_res))
}
else{
HTML(sprintf('<h4>On %s (Day <b>%s</b>), there will be <b>%s hospital beds available</b> in the region,
with <b>%s available ICU beds</b> and <b>%s available ventilators</b>.</h4>',
select.date, select.day, hosp_res, icu_res, vent_res))
}
}
})
## ............................................................................
## Download Data
## ............................................................................
output$downloadData <- downloadHandler(
filename <- function() {
paste(input$selected_graph, '-', Sys.Date(), '.csv', sep='')
},
content <- function(file) {
if (input$selected_graph == 'Cases'){
data <- cases.df()
}
else if (input$selected_graph == 'Hospitalization'){
data <- hospitalization.df()
}
else{
data <- resource.df()
}
write.csv(data.frame(data), file, row.names = FALSE)
}
)
}
) |
6c1a96aabe5417b2926e87929a842fee397a2749 | 80a2e57ee2b6e1465fcb88f677a7075140acd0c9 | /MovieLens_Final project.R | 688d5aae56b4454bc426ba31ece90d66d5ffb168 | [] | no_license | SUBRAMANIANKN/movielens | 5016e70eb89e6cf0b632a9e9bb48d10d53fbe96f | 3fc53988e391b6d2621ec5cf841f3411122366fb | refs/heads/master | 2020-04-22T22:09:24.719239 | 2019-10-07T11:56:49 | 2019-10-07T11:56:49 | 170,698,049 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,856 | r | MovieLens_Final project.R | ## Data Loading and Preparation
#############################################################
# Create edx set, validation set, and submission file
#############################################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
# Add the required libraries
library(caret)
library(tidyverse)
library(lubridate)
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- read.table(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1)
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
# Extract the year of release information from the title column and add that info to a new column year_release
edx <- edx %>% mutate(year_release = as.numeric(str_sub(title,-5,-2)))
validation <- validation %>% mutate(year_release = as.numeric(str_sub(title,-5,-2)))
# Add a new column called year_eval and update it with year of Evaluation information from the timestamp column
edx <- edx %>% mutate(year_eval = year(as_datetime(timestamp)))
validation <- validation %>% mutate(year_eval = year(as_datetime(timestamp)))
# To save memory remove the timestamp & title columns from both the training and validation data set
edx <- edx %>% select(-c(timestamp,title))
validation <- validation %>% select(-c(timestamp, title))
# Summary details of the data set
summary(edx)
summary(validation)
# Unique list of values across different fields
edx %>% summarize(UnqGenre = n_distinct(genres), UnqMid = n_distinct(movieId) ,
UnqUid = n_distinct(userId), UnqYRls = n_distinct(year_release),
UnqYEva = n_distinct(year_eval))
# Analysing how many times different Movies have been rated
edx %>% count(movieId) %>% ggplot(aes(n)) +
geom_histogram(fill = "darkblue") +
scale_x_log10() +
labs(title = "Movie Data Distribution",
subtitle = "Movie distribution by count",
x = "Movies ratingn count",
y = "Frequency")
#Top 5 movies which has more review rating than others
top5Movies <- edx %>% group_by(movieId) %>% summarize(count = n()) %>%
arrange(desc(count)) %>% head(5)
top5Movies
# Analysing how many times different user have rated movies
edx %>% count(userId) %>% ggplot(aes(n)) +
geom_histogram(fill = "darkblue" , bins=30) +
scale_x_log10() +
labs(title = "Movie Data Distribution",
subtitle = " User distribution by count",
x = "Users",
y = "Frequency")
# Top 5 users who rated more movies than others
top5Users <- edx %>% group_by(userId) %>% summarize(count = n()) %>%
arrange(desc(count)) %>% head(5)
top5Users
# Analysing how many times different movies have scored similar ratings
edx %>% ggplot(aes(rating)) +
geom_histogram(fill = "darkblue") +
labs(title = "Movie Data Distribution",
subtitle = "Rating distribution by count",
x = "Rating",
y = "Frequency")
# Ratings which has more count during movie review
top5Rating <- edx %>% group_by(rating) %>% summarize(count = n()) %>%
arrange(desc(count)) %>% head(5)
top5Rating
# Analysing how old the different movies are that were rated.
edx %>% ggplot(aes(year_release)) +
geom_histogram(fill = "darkblue") +
labs(title = "Movie Data Distribution",
subtitle = "Movie release year distribution by count",
x = "Release YEAR",
y = "Frequency")
# The top 5 year where there are more movie releases
top5YrRls <- edx %>% group_by(year_release) %>%
summarize(count = n()) %>% arrange(desc(count)) %>% head(5)
top5YrRls
# Analysing how many ovies were rated during different years
edx %>% ggplot(aes(year_eval)) +
geom_histogram(fill = "darkblue") +
labs(title = "Movie Data Distribution",
subtitle = "Movie evaluation distribution by count",
x = "Evaluation Year",
y = "Frequency")
#Top 5 years when more movies were evaluated when compared to rest of the period
top5YrEval <- edx %>% group_by(year_eval) %>%
summarize(count = n()) %>% arrange(desc(count)) %>% head(5)
top5YrEval
# We shall also look at the top 5 movie Genres which were reviewed more than others
top5Genres <- edx %>% group_by(genres) %>%
summarize(count = n()) %>% arrange(desc(count)) %>% head(5)
top5Genres
# Model deveopment
# Creating Baseline model
mu_edx_rating <- mean(edx$rating)
model_baseline <- RMSE(validation$rating, mu_edx_rating)
rmse_report <- tibble(method = "Base Line average Model", RMSE = model_baseline)
rmse_report%>%knitr::kable()
# Creating model based on impact of Movie genre
movieGenres_avgs_norm <- edx %>% group_by(genres) %>%
summarize(b_movieGenres = mean(rating - mu_edx_rating))
predicted_movieGenres_norm <- validation %>%
left_join(movieGenres_avgs_norm, by='genres') %>%
mutate(rating = mu_edx_rating + b_movieGenres )
model_movieGenres_rmse <- RMSE(predicted_movieGenres_norm$rating , validation$rating)
rmse_report <- rbind(rmse_report, tibble(method = "Movie Genre Model",
RMSE = model_movieGenres_rmse))
rmse_report%>%knitr::kable()
# Creating model based on impact of Movie genre & User impact
movieGenres_userId_avgs_norm <- edx %>%
left_join(movieGenres_avgs_norm, by='genres') %>%
group_by(userId) %>%
summarize(b_movieGenres_userId =
mean(rating - mu_edx_rating - b_movieGenres))
predicted_movieGenres_userId_norm <- validation %>%
left_join(movieGenres_avgs_norm, by='genres') %>%
left_join(movieGenres_userId_avgs_norm, by='userId') %>%
mutate(rating = mu_edx_rating + b_movieGenres + b_movieGenres_userId )
model_movieGenres_userId_rmse <-
RMSE(predicted_movieGenres_userId_norm$rating , validation$rating)
rmse_report <- rbind(rmse_report,
tibble(method = "Movie Genre & User effect Model",
RMSE = model_movieGenres_userId_rmse))
rmse_report%>%knitr::kable()
# Creating model based on impact of Movie genre, User & Year Release impact model
movieGenres_userId_yearRelease_avgs_norm <- edx %>%
left_join(movieGenres_avgs_norm, by='genres') %>%
left_join(movieGenres_userId_avgs_norm, by='userId') %>%
group_by(year_release) %>%
summarize(b_movieGenres_userId_yearRelease =
mean(rating - mu_edx_rating -
b_movieGenres - b_movieGenres_userId))
predicted_movieGenres_userId_yearRelease_norm <- validation %>%
left_join(movieGenres_avgs_norm, by='genres') %>%
left_join(movieGenres_userId_avgs_norm, by='userId') %>%
left_join(movieGenres_userId_yearRelease_avgs_norm, by='year_release') %>%
mutate(rating = mu_edx_rating + b_movieGenres + b_movieGenres_userId +
b_movieGenres_userId_yearRelease )
model_movieGenres_userId_yearRelease_rmse <-
RMSE(predicted_movieGenres_userId_yearRelease_norm$rating ,
validation$rating)
rmse_report <-
rbind(rmse_report,
tibble(method = "Movie Genre, User & Release Year effect Model",
RMSE = model_movieGenres_userId_yearRelease_rmse))
rmse_report%>%knitr::kable()
# Creating model based on impact of Movie genre, User, Release Year & Evaluation year impact model
movieGenres_userId_yearRelease_yearEval_avgs_norm <- edx %>% left_join(movieGenres_avgs_norm, by='genres') %>%
left_join(movieGenres_userId_avgs_norm, by='userId') %>%
left_join(movieGenres_userId_yearRelease_avgs_norm, by='year_release') %>%
group_by(year_eval) %>%
summarize(b_movieGenres_userId_yearRelease_yearEval =
mean(rating - mu_edx_rating - b_movieGenres -
b_movieGenres_userId - b_movieGenres_userId_yearRelease))
predicted_movieGenres_userId_yearRelease_yearEval_norm <- validation %>%
left_join(movieGenres_avgs_norm, by='genres') %>%
left_join(movieGenres_userId_avgs_norm, by='userId') %>%
left_join(movieGenres_userId_yearRelease_avgs_norm, by='year_release') %>%
left_join(movieGenres_userId_yearRelease_yearEval_avgs_norm, by='year_eval') %>%
mutate(rating = mu_edx_rating + b_movieGenres + b_movieGenres_userId +
b_movieGenres_userId_yearRelease +
b_movieGenres_userId_yearRelease_yearEval )
model_movieGenres_userId_yearRelease_yearEval_rmse <-
RMSE(predicted_movieGenres_userId_yearRelease_yearEval_norm$rating ,
validation$rating)
rmse_report <-
rbind(rmse_report,
tibble(method = "Movie Genre, User, Release Year & Evaluation Year effect Model", RMSE = model_movieGenres_userId_yearRelease_yearEval_rmse))
rmse_report%>%knitr::kable()
# There is no significant impact by including the year of evaluation in the model
# So far we tried model based on Movie Genre as it will help if in future we get any new movie (not in training set but belongs to genre available in training set) to be predicted for rating .
# Now we shall slightly change track and try model based on the Movie Id to see whether we can improve the RMSE further though this model will not be able to help if a new movie which is not in the existing data set has to be predicted for rating even thought it belongs to existing genre
# Creating model based on Movie Id impact
movieId_avgs_norm <- edx %>%
group_by(movieId) %>%
summarize(b_movieId = mean(rating - mu_edx_rating))
predicted_movieId_norm <- validation %>%
left_join(movieId_avgs_norm, by='movieId') %>%
mutate(rating = mu_edx_rating + b_movieId )
model_movieId_rmse <- RMSE(predicted_movieId_norm$rating , validation$rating)
rmse_report <-
rbind(rmse_report, tibble(method = "Movie effect Model",
RMSE = model_movieId_rmse))
rmse_report%>%knitr::kable()
# Creating model based on Movie Id & User impact
movieId_userId_avgs_norm <- edx %>%
left_join(movieId_avgs_norm, by='movieId') %>%
group_by(userId) %>%
summarize(b_movieId_userId = mean(rating - mu_edx_rating - b_movieId))
predicted_movieId_userId_norm <- validation %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
mutate(rating = mu_edx_rating + b_movieId + b_movieId_userId )
model_movieId_userId_rmse <-
RMSE(predicted_movieId_userId_norm$rating , validation$rating)
rmse_report <- rbind(rmse_report, tibble(method = "Movie & User effect Model",
RMSE = model_movieId_userId_rmse))
rmse_report%>%knitr::kable()
# Creating model based on Movie Id, User & Year Release impact
movieId_userId_yearRelease_avgs_norm <- edx %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
group_by(year_release) %>%
summarize(b_movieId_userId_yearRelease =
mean(rating - mu_edx_rating - b_movieId - b_movieId_userId))
predicted_movieId_userId_yearRelease_norm <- validation %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
left_join(movieId_userId_yearRelease_avgs_norm, by='year_release') %>%
mutate(rating = mu_edx_rating + b_movieId + b_movieId_userId +
b_movieId_userId_yearRelease )
model_movieId_userId_yearRelease_rmse <-
RMSE(predicted_movieId_userId_yearRelease_norm$rating , validation$rating)
rmse_report <-
rbind(rmse_report, tibble(method = "Movie, User & Release Year effect Model ",
RMSE = model_movieId_userId_yearRelease_rmse))
rmse_report%>%knitr::kable()
# Creating model based on Movie Id, User & Year Release & Evaluation Year impact
movieId_userId_yearRelease_yearEval_avgs_norm <- edx %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
left_join(movieId_userId_yearRelease_avgs_norm, by='year_release') %>%
group_by(year_eval) %>%
summarize(b_movieId_userId_yearRelease_yearEval =
mean(rating - mu_edx_rating - b_movieId - b_movieId_userId -
b_movieId_userId_yearRelease))
predicted_movieId_userId_yearRelease_yearEval_norm <- validation %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
left_join(movieId_userId_yearRelease_avgs_norm, by='year_release') %>%
left_join(movieId_userId_yearRelease_yearEval_avgs_norm, by='year_eval') %>%
mutate(rating = mu_edx_rating + b_movieId + b_movieId_userId +
b_movieId_userId_yearRelease + b_movieId_userId_yearRelease_yearEval)
model_movieId_userId_yearRelease_yearEval_rmse <-
RMSE(predicted_movieId_userId_yearRelease_yearEval_norm$rating ,
validation$rating)
rmse_report <-
rbind(rmse_report,
tibble(method = "Movie, User, Release Year & Evaluation Year effect Model",
RMSE = model_movieId_userId_yearRelease_yearEval_rmse))
rmse_report%>%knitr::kable()
# Creating model based on Regularziation of Movie Id, User & Year Release & Evaluation Year & Evaluation Year
lambdas <- seq(0, 10, 0.25)
rmses <- sapply(lambdas, function(l){
movieId_avgs_norm <- edx %>% group_by(movieId) %>%
summarize(b_movieId = sum(rating - mu_edx_rating)/(n()+l))
movieId_userId_avgs_norm <- edx %>%
left_join(movieId_avgs_norm, by='movieId') %>%
group_by(userId) %>%
summarize(b_movieId_userId = sum(rating - mu_edx_rating - b_movieId)/(n()+l))
movieId_userId_yearRelease_avgs_norm <- edx %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
group_by(year_release) %>%
summarize(b_movieId_userId_yearRelease =
sum(rating - mu_edx_rating - b_movieId - b_movieId_userId)/(n()+l))
movieId_userId_yearRelease_yearEval_avgs_norm <- edx %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
left_join(movieId_userId_yearRelease_avgs_norm, by='year_release') %>%
group_by(year_eval) %>%
summarize(b_movieId_userId_yearRelease_yearEval =
sum(rating - mu_edx_rating - b_movieId - b_movieId_userId -
b_movieId_userId_yearRelease)/(n()+l))
predicted_movieId_userId_yearRelease_yearEval_norm <- validation %>%
left_join(movieId_avgs_norm, by='movieId') %>%
left_join(movieId_userId_avgs_norm, by='userId') %>%
left_join(movieId_userId_yearRelease_avgs_norm, by='year_release') %>%
left_join(movieId_userId_yearRelease_yearEval_avgs_norm, by='year_eval') %>%
mutate(rating = mu_edx_rating + b_movieId + b_movieId_userId +
b_movieId_userId_yearRelease + b_movieId_userId_yearRelease_yearEval)
return(RMSE(predicted_movieId_userId_yearRelease_yearEval_norm$rating ,
validation$rating))
})
qplot(lambdas, rmses)
lambda <- lambdas[which.min(rmses)]
lambda
rmse_report <-
rbind(rmse_report,
tibble(method = " Regularized Movie, User, Release Year & Evaluation Year effect Model",
RMSE = min(rmses)))
rmse_report%>%knitr::kable()
|
75dbed5dcd0c32da98beae424376effb617cf13f | 9083fbc538fc22e5deef1c38238cbb98eec0eca9 | /GENESIS_R/GENESIS/nullModelTestPrep.R | 7db785fe44b993cc178b479b01f57de9c614b4e0 | [] | no_license | drjingma/REHE | 8e75c988d10171cd37a49bc7630f0e62d4cfcfeb | 13490695d3035e2af80011066272a672cb79e531 | refs/heads/master | 2023-03-17T09:33:15.836142 | 2020-12-15T05:57:11 | 2020-12-15T05:57:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,915 | r | nullModelTestPrep.R |
## takes a null model and prepare specific arguments to streamline the testing
nullModelTestPrep <- function(nullmod){
Y <- nullmod$workingY
X <- nullmod$model.matrix
C <- nullmod$cholSigmaInv
if (length(C) > 1) { ## n by n cholSigmaInv (may be Diagonal)
if (is(C, "Matrix")) X <- Matrix(X)
CX <- crossprod(C, X)
CXCXI <- tcrossprod(CX, chol2inv(chol(crossprod(CX))))
# qrmod <- base::qr(CX)
# Ytilde <- base::qr.resid(qrmod, as.matrix(crossprod(C, Y)))
CY <- crossprod(C, Y)
Ytilde <- CY - tcrossprod(CXCXI, crossprod(CY, CX))
resid <- C %*% Ytilde
# resid <- tcrossprod(C, crossprod(nullmod$resid.marginal, C))
} else { ## cholSigmaInv is a scalar
CX <- C*X
CXCXI <- tcrossprod(CX, chol2inv(chol(crossprod(CX))))
# qrmod <- base::qr(CX)
# Ytilde <- base::qr.resid(qrmod, as.matrix(C*Y))
CY <- C*Y
Ytilde <- CY - tcrossprod(CXCXI, crossprod(CY, CX))
resid <- C*Ytilde
# resid <- nullmod$resid.marginal*C^2
}
# compute residual sum of squares under the null model
RSS0 <- as.numeric(crossprod(Ytilde))
return(list(Ytilde = Ytilde, resid = resid, CX = CX, CXCXI = CXCXI, RSS0 = RSS0))
# return(list(Ytilde = Ytilde, resid = resid, CX = CX, CXCXI = CXCXI, qr = qrmod))
}
## adjust genotypes for correlation structure and fixed effects
## this replaces calcXtilde; changed the name to be less confusing; X is covariates and G is genotypes
calcGtilde <- function(nullmod, G){
C <- nullmod$cholSigmaInv
if(length(C) > 1){ # n by n cholSigmaInv (may be Diagonal)
CG <- crossprod(C, G)
}else{ # cholSigmaInv is a scalar
CG <- C*G
}
# calculate Gtilde
nrowG <- as.numeric(nrow(CG))
ncolG <- as.numeric(ncol(CG))
if(length(C) == 1 || nrowG*ncolG <= 2^31){
Gtilde <- CG - tcrossprod(nullmod$CXCXI, crossprod(CG, nullmod$CX))
# base::qr.resid(nullmod$qr, CG) # QR seems to be slower unexpectedly
}else{
# too large when G sparse; break into multiple blocks
nblock <- ceiling(nrowG*ncolG/2^31)
blocks <- unname(split(1:ncolG, cut(1:ncolG, nblock)))
Gtilde <- list()
for(i in 1:length(blocks)){
Gtilde[[i]] <- as.matrix(CG[,blocks[[i]]] - tcrossprod(nullmod$CXCXI, crossprod(CG[,blocks[[i]]], nullmod$CX)))
}
Gtilde <- do.call(cbind, Gtilde)
}
return(Gtilde)
}
## adjust genotypes for correlation structure and fixed effects
# calcXtilde <- function(nullmod, G){
# C <- nullmod$cholSigmaInv
# if (length(C) > 1) { ## n by n cholSigmaInv (may be Diagonal)
# M1 <- crossprod(C, G)
# } else { ## cholSigmaInv is a scalar
# M1 <- G * C
# }
# rm(G)
# Xtilde <- M1 - tcrossprod(nullmod$CXCXI, crossprod(M1, nullmod$CX))
# return(Xtilde)
# }
|
bc6755b3a0ec8205d3aadf259caad1c8f7a0013e | 3cc46de0679d5d63bed8eacd7db453ba122aa4c3 | /R/t1_analysis.R | 2a8916edce906e88f8e7243449f83be7b3802d21 | [] | no_license | fontikar/egerniasl | af8aaeb0f5eca77e741edaa7d5e5fc0efabfc31c | 367ee5ebf668e5008e90843fc0fe111190e6f66f | refs/heads/master | 2020-12-12T17:06:43.931493 | 2016-12-09T03:30:08 | 2016-12-09T03:30:08 | 52,262,907 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,416 | r | t1_analysis.R | #Setting working directory
getwd()
#setwd("C:/Users/xufeng/Dropbox/social learning/output/data/")
setwd("~/Dropbox/Egernia striolata social learning/")
#load library you need
library(plyr)
library(MASS)
library(lme4)
library(plyr)
library(survival)
#Read data
instrumdat <- read.csv("output/data/task1_finaldat.csv", stringsAsFactors = FALSE)
head(instrumdat) #261 obs
str(instrumdat)
#Changing variable types
instrumdat$LizardID <- as.factor(instrumdat$LizardID)
instrumdat$Batch <- as.factor(instrumdat$Batch)
instrumdat$Treatment <- as.factor(instrumdat$Treatment)
instrumdatlt <- as.factor(instrumdat$lt)
instrumdat$Date <-as.Date(instrumdat$Date, format="%m/%d/%Y")
instrumdat$Time <- as.factor(instrumdat$Time)
#Getting Batch variable
#batchinfo <- assocdat[,1:2]
#str(batchinfo)
#str(instrumdat)
#batchinfo<-unique(batchinfo)
#length(unique(batchinfo$LizardID)) == length(unique(instrumdat$LizardID))
#sort(unique(batchinfo$LizardID)) == sort(unique(instrumdat$LizardID))
#instrumdat <- merge(instrumdat, batchinfo, by = "LizardID")
#str(instrumdat)
#write.csv(instrumdat, file ="task1_finaldat.csv")
# Exploration plots
hist(instrumdat$Latency)
instrumdat$log.latency<-log(instrumdat$Latency)
hist(instrumdat$log.latency)
#Descriptive statistics
length((unique(instrumdat[instrumdat$Treatment == "SL",2]))) #n = 15 for social learning treatment lizards
length((unique(instrumdat[instrumdat$Treatment == "C",2]))) # n = 13 for control lizards
#Proportion of lizards that learnt per trial
instrumdat[instrumdat$learnt == "0",]
table(instrumdat$Trial)
library(plyr)
instrum_proplearndat<- ddply(.data=instrumdat, .(Trial, Treatment), summarise, learnt = sum((lt==0)), sample_size = length(LizardID), proportion = round(learnt/sample_size, 2))
instrum_proplearndat$Treatment <- as.character(instrum_proplearndat$Treatment)
instrum_proplearndat$Treatment[instrum_proplearndat$Treatment == "SL"] <- "1"
instrum_proplearndat$Treatment[instrum_proplearndat$Treatment == "C"] <- "0"
instrum_proplearndat$Treatment <- as.factor(instrum_proplearndat$Treatment)
instrum_proplearndat <-instrum_proplearndat[with(instrum_proplearndat, order(Treatment)), ]
in_condata <-instrumdat[instrumdat$Treatment == "C",]
length(unique(in_condata$LizardID))
in_con_vec <-as.vector(table(in_condata$lt, in_condata$Trial)[1,])
in_socdata <-instrumdat[instrumdat$Treatment == "SL",]
length(unique(in_socdata$LizardID))
in_soc_vec <-as.vector(table(in_socdata$lt, in_socdata$Trial)[1,])
instrum_proplearndat$numlearnt <- append(in_con_vec, in_soc_vec)
instrum_proplearndat$proportion <- instrum_proplearndat$numlearnt/instrum_proplearndat$sample_size
SLprop <-instrum_proplearndat[instrum_proplearndat$Treatment == "1",]
Cprop <-instrum_proplearndat[instrum_proplearndat$Treatment == "0",]
#Plotting figure propportion learnt over trials
pdf("Figure1A-B.pdf", 13,7)
par(mfrow=c(1,2), mar = c(4, 5, 1.5, 1.5), cex.axis=1.5, mai=c(1,1,0.6,0.2), las=1)
plot(proportion~Trial, data=instrum_proplearndat, pch=c(1,19), col=("black"), cex=1.5, ylim = c(0,1), ann=F, xaxt = "n", type="n")
axis(1, at=c(1:10))
title(ylab = list("Proportion of sample that learnt", cex=1.5),line=3)
title(xlab = list("Trial", cex=1.5),line=2.5)
points(proportion~Trial, data=SLprop, cex=1.5, col="black", pch=19)
points(proportion~Trial, data=Cprop, cex=1.5, pch=1)
lines(proportion~Trial, data=SLprop, lwd=2)
lines(proportion~Trial, data=Cprop, lwd=2, lty=5)
legend(7.5, 0.1, c("Control", "Social"), lty= c(1, 5), pch=c(1,19), cex=1.2, bty='n')
mtext("a)", adj = -0.15, padj = -0.20, cex=1.4)
####Cox Proportional Hazard analysis
instrum_surv_dat<- ddply(.data=instrumdat, .(LizardID, Treatment, Batch), summarise, Time = sum(lt), Event = unique(learnt))
instrum_surv_dat$Treatment <- as.character(instrum_surv_dat$Treatment)
instrum_surv_dat[instrum_surv_dat$Treatment == "SL", 2] <- "1"
instrum_surv_dat[instrum_surv_dat$Treatment == "C", 2] <- "0"
instrum_surv_dat$Treatment <- as.factor(instrum_surv_dat$Treatment)
insurv.fit1 <- coxph(Surv(Time, Event)~strata(Treatment)*Batch, data=instrum_surv_dat)
summary(insurv.fit1)
insurv.fit2 <- coxph(Surv(Time, Event)~strata(Treatment)+Batch, data=instrum_surv_dat)
summary(insurv.fit2)
summary(survfit(insurv.fit2))
insurv.fit2a <- coxph(Surv(Time, Event)~Treatment+Batch, data=instrum_surv_dat)
summary(insurv.fit2a)
cox.zph(insurv.fit2)
cox.zph(insurv.fit2a)
cox.zph(insurv.fit1)
#Plotting these curves
pdf("Figure1A-B.pdf", 13,7)
par(mfrow=c(1,2), mar = c(4, 5, 1.5, 1.5), cex.axis=1.5, mai=c(1,1,0.6,0.2), las=1)
plot(survfit(insurv.fit2), lty=c(2,1), lwd=2)
#Control
con_x_trial <- c(6, 6, 8)
summary(survfit(insurv.fit2))
con_y_up <- c(1,0.615, 0.615)
con_y_low <- c(1, 0.066, 0.066)
#lines(con_x_trial, con_y_low)
#lines(con_x_trial, con_y_up)
#Social
soc_x_trial <- c(8,8,10,10,15,15)
summary(survfit(insurv.fit2))$time[11:13]
soc_y_up <- c(1, round(rep(summary(survfit(insurv.fit2))$upper[11:13], each =2)[1:5],2))
soc_y_low <- c(1,round(rep(summary(survfit(insurv.fit2))$lower[11:13], each =2)[1:5],2))
lines(soc_x_trial, soc_y_low, lty= 3)
lines(soc_x_trial, soc_y_up, lty= 3)
#legend(0.3,0.1, c("Control", "Social"), lty= c(2, 1), bty='n', cex=1.2, lwd = 2, x.intersp = 1.5)
title(ylab = list("Proportion of lizards that have not learnt", cex=1.5), line =3)
title(xlab = list("Trial Number", cex=1.5))
mtext("a)", adj = -0.15, padj = -0.20, cex=1.4)
#dev.off()
# Mean number of trials taken to learn
sumdat2 <-ddply(.data=instrumdat, .(LizardID, Treatment, Batch), summarise, trials_to_learn=sum((lt)), total_trials=length(lt))
sumdat2
sumdat2$Treatment <-as.character(sumdat2$Treatment)
sumdat2$Treatment[sumdat2$Treatment == "SL"] <- "1"
sumdat2$Treatment[sumdat2$Treatment == "C"] <- "0"
sumdat2$Treatment <-as.factor(sumdat2$Treatment)
str(sumdat2)
task1mod.1<-glm.nb(trials_to_learn~Treatment+Batch, data=sumdat2)
Table.1A <- data.frame(matrix(nrow = 3, ncol=2))
rownames(Table.1A) <- c("Intercept", "Treatment (SOC)", "Batch (2)")
colnames(Table.1A) <- c("Est", "SE")
summary(task1mod.1)$coefficients
#Est
Table.1A[1,1] <- round(summary(task1mod.1)$coefficients[1,1],2)
Table.1A[2,1] <- round(summary(task1mod.1)$coefficients[2,1],2)
Table.1A[3,1] <- round(summary(task1mod.1)$coefficients[3,1],2)
#SE
Table.1A[1,2] <- round(summary(task1mod.1)$coefficients[1,2],2)
Table.1A[2,2] <- round(summary(task1mod.1)$coefficients[2,2],2)
Table.1A[3,2] <- round(summary(task1mod.1)$coefficients[3,2],2)
write.csv(Table.1A, file = "Table.1A.csv")
##################
newdat <-data.frame(Treatment = c(0, 1), Batch = c(1,1))
newdat$Treatment <- as.factor(newdat$Treatment)
newdat$Batch <- as.factor(newdat$Batch)
trials_pred <-predict.glm(task1mod.1, type= "response", se.fit = T,newdata=newdat)
newdat$trials_pred <- trials_pred$fit
newdat$trials_pred_SE <- trials_pred$se.fit
newdat$trials_pred_U <- trials_pred$fit + trials_pred$se.fit
newdat$trials_pred_L <- trials_pred$fit - trials_pred$se.fit
#Plotting figure Mean number of trials to learn
#setwd("~/Dropbox/Egernia striolata social learning/output/fig/")
#pdf("Task1_Meantrials.pdf", 7.66, 7.55)
#par(xaxt="n", mar = c(4, 4.5, 1.5, 2), cex.axis=1.5, mai=c(0.5,0.7,0.5,0.5))
barplot(newdat$trials_pred, ylim = c(0, 10), xlim =c(0,3.5), space=0.5, col=c("white", "grey"))
box()
title(ylab = list("Mean number of trials taken to learn", cex=1.5))
mtext("Control", at= 1, side = 1, line = 1.2 ,cex = 1.5)
mtext("Social", at=2.5, side =1,line = 1.2, cex= 1.5)
up.x<-c(1,2.5)
low.x<-up.x
arrows(x0 = up.x, y0 = newdat$trials_pred, x1 = up.x, y1 = newdat$trials_pred_U, length = 0.2, angle = 90, lwd=2)
arrows(x0 = low.x, y0 = newdat$trials_pred, x1 = low.x, y1 = newdat$trials_pred_L, length = 0.2, angle = 90, lwd=2)
segments(x0 =1, y0=8, x1 = 2.6, y1 =8, lwd= 2)
text(x=1.8, y=8.5, labels="n.s.", cex=1.5, font=1)
mtext("b)", adj = -0.15, padj = -0.2, cex=1.4)
dev.off()
#Plotting figure Mean number of trials to learn
par(xaxt="n", mar=c(3,6,3,3))
pdf("Task1_Meantrials.pdf", 7.66, 7.55)
barplot(sumdat3$mean_number_trials, ylim = c(0, 10), xlim =c(0,3.5), space=0.5, col=c("white", "grey"), cex.axis = 1.5)
box()
title(ylab = list("Mean number of trials taken to learn", cex=1.5))
mtext("Control", at= 1, side = 1, line = 1 ,cex = 1.5)
mtext("Social", at=2.5, side =1,line =1, cex= 1.5)
up.x<-c(1,2.5)
low.x<-up.x
arrows(x0 = up.x, y0 = sumdat3$mean_number_trials, x1 = up.x, y1 = sumdat3$upper, length = 0.2, angle = 90, lwd=2)
arrows(x0 = low.x, y0 = sumdat3$mean_number_trials, x1 = low.x, y1 = sumdat3$lower, length = 0.2, angle = 90, lwd=2)
segments(x0 =1, y0=7, x1 = 2.6, y1 =7, lwd= 2)
text(x=1.8, y=7.5, labels="n.s.", cex=1.5, font=1)
dev.off()
#Mean latency
stderror<-function(x){
sd(x)/(sqrt(length(x)))
}
latdat <-ddply(.data = instrumdat, .(LizardID, Treatment), summarise, mean_latency = mean(Latency, na.rm=T))
latdat2 <-ddply(.data = latdat, .(Treatment), summarise, mean_latency2 = mean(mean_latency), SE = stderror(mean_latency), upper = mean_latency2+SE, lower = mean_latency2-SE )
latdat2$value <- c(1.5,3.5)
latdat2
#Model testing difference in latency between treatment
latfit<-glm(mean_latency~Treatment, data=latdat)
summary(latfit)
par(xaxt="n", mar=c(3,6,3,3))
barplot(latdat2$mean_latency2, ylim = c(0, 1000), xlim =c(0,3.5), space=0.5, col=c("white", "grey"), cex.axis =1.5)
box()
title(ylab = list("Mean latency to flip lid (s)", cex=1.5), line = 3)
mtext("Control", at= 1, side = 1, line = 1 ,cex = 1.5)
mtext("Social", at=2.5, side =1,line =1, cex= 1.5)
up.value<-c(1, 2.5)
down.value<-up.value
arrows(x0 = up.value, y0 = latdat2$mean_latency2, x1 = up.value, y1 = latdat2$upper, length = 0.2, angle = 90, lwd=2)
arrows(x0 = down.value, y0 = latdat2$mean_latency2,, x1 = down.value, y1 = latdat2$lower, length = 0.2, angle = 90, lwd=2)
segments(x0 =1, y0=850, x1 = 2.6, y1 =850, lwd= 2)
text(x=1.8, y=900, labels="P < 0.001", cex=1.5, font=1)
##############
#Number of successful attempts
learningphase<-instrumdat[instrumdat$lt == 1, ]
learningphase$LizardID<-factor(learningphase$LizardID)
str(learningphase)
incordat<-ddply(.data=learningphase, .(LizardID,Treatment), summarise, correct_choice=sum((Correct==1)), total=length(Correct), incorrect_choice=total-correct_choice)
incordat
stderror<-function(x){
sd(x)/(sqrt(length(x)))
}
meandat<-ddply(.data=incordat, .(Treatment), summarise, mean_corr=mean(correct_choice), se_corr=stderror(correct_choice))
meandat$value <- c(1.5, 3.5)
newdat <- data.frame(Treatment = c("C", "SL"))
incorfit<-glm.nb(correct_choice~Treatment, data=incordat)
summary(incorfit)
incordpred<-predict.glm(incorfit, newdata=newdat, se.fit = TRUE, type = "response")
meandat$se_corr<- incordpred$se.fit
meandat$upper <- meandat$mean_corr+meandat$se_corr
meandat$lower <- meandat$mean_corr-meandat$se_corr
par(xaxt="n", mar=c(3,6,3,3))
barplot(meandat$mean_corr, ylim = c(0, 10), xlim =c(0,3.5), space=0.5, col=c("white", "grey"), cex.axis=1.5)
box()
title(ylab = list("Mean number of successful attempts", cex=1.5))
mtext("Control", at= 1, side = 1, line = 1 ,cex = 1.5)
mtext("Social", at=2.5, side =1,line =1, cex= 1.5)
up.value<-c(1, 2.5)
down.value<-up.value
arrows(x0 = up.value, y0 = meandat$mean_corr, x1 = up.value, y1 = meandat$upper, length = 0.2, angle = 90, lwd=2)
arrows(x0 = down.value, y0 = meandat$mean_corr,, x1 = down.value, y1 = meandat$lower, length = 0.2, angle = 90, lwd=2)
segments(x0 =1, y0=8, x1 = 2.6, y1 =8, lwd= 2)
text(x=1.8, y=8.5, labels="n.s.", cex=1.5, font=1)
#######
trialdat<-ddply(.data=instrumdat, .(LizardID, Treatment), summarise, Number_of_Trials=max(Trial))
write.csv(trialdat, "Instrumental_Table.csv")
#####
# Robustness of learning criteria
split1<-split(instrumdat, instrumdat$LizardID)
Robust<-function(x){
{
start<-sum(x$lt == 1)+1
test_1<-x[start:nrow(x),"Correct"]
aftertrials<-length(test_1)
correct_after<-sum(test_1 == 1)
propcorrect_after<-round((sum(test_1 == 1))/(length(test_1)),2)
}
vec<-data.frame(start,aftertrials,correct_after,propcorrect_after)
}
Criteria_robust<-lapply(split1, function(x) Robust(x))# 3 lizards below 80% mark
split1
#Motivation
unprocessed1 <-read.csv("data/Task1.csv")
unprocessed1$TubID <- as.factor(unprocessed1$TubID)
unprocessed1$LizardID <- as.factor(unprocessed1$LizardID)
str(unprocessed1)
rawdat1 <- split(unprocessed1, unprocessed1$LizardID)
NaTest <-function(x){
{
{
{
{
NoAttempt<-sum(is.na(x$Correct))
}
TrialsGiven<-max(x$Trial)
}
PropAttempt<-round(((TrialsGiven-NoAttempt)/TrialsGiven),2)
}
ReachCriteria<-PropAttempt>=0.85
}
checking<-data.frame(NoAttempt,TrialsGiven,PropAttempt,ReachCriteria)
}
motivdat1 <- as.vector(lapply(rawdat1, function(x) NaTest(x)))
###################
#Survival analysis#
###################
instrum_learndat <- ddply(.data=instrumdat, .(LizardID, Trial, Treatment), summarise, lt = lt, did.it.learn = learnt)
instrum_learndat
instrum_surv_dat <- ddply(.data=instrum_learndat, .(LizardID, Treatment), summarise, Time = sum(lt), Event = unique(did.it.learn))
str(instrum_surv_dat)
instrum_surv_dat$Treatment <- as.character(instrum_surv_dat$Treatment)
instrum_surv_dat[instrum_surv_dat$Treatment == "SL", 2] <- "1"
instrum_surv_dat[instrum_surv_dat$Treatment == "C", 2] <- "0"
instrum_surv_dat$Treatment <- as.factor(instrum_surv_dat$Treatment)
#Data for control group
fit_instru_con<-survfit(Surv(Time[Treatment == "0"],Event[Treatment == "0"])~1,data=instrum_surv_dat)
par(mfrow=c(1,1), mar = c(4, 5, 1.5, 1.5), cex.axis=1.5, mai=c(1,1,0.6,0.2), cex.lab=1.5)
plot(fit_instru_con, xlab="Trial Number")
title(ylab = list("Probability of NOT learning", cex=1.5), line =3.5)
#plotting lines for social learning group
fit_instru_sock<-survfit(Surv(Time[Treatment == "1"],Event[Treatment == "1"])~1,data=instrum_surv_dat)
summary(fit_instru_sock)
plot(fit_instru_sock, xlab="Trial Number")
x<-c( seq(0,6), 6, seq(6,7))
length(x)
y<-c(1,1,1,1,1,1,1,0.0667,0.0667, 0.0667)
length(y)
lines(x,y,col="orange2",lwd=2)
up<-c(1,1,1,1,1,1,1,0.443,0.443, 0.443)
length(up)
lines(x,up,col="orange2",lwd=2, lty= 3)
lo<-c(1,1,1,1,1,1,1,0.01 ,0.01, 0.01 )
length(lo)
lines(x,lo,col="orange2",lwd=2, lty= 3)
###################
wilcox.test(0.15384615,0.06666667)
# W=1, p=1
|
314e75dbebd8c66c4d9247beab5c3f1a885dccf5 | cdf7ceb2b0aeb8e5ab46e2ffdf8358682b725b08 | /0.3 Fit simple GP model to simulated data.R | d79eef8d834084b5d413721b3cd6d98b6ff3cf16 | [] | no_license | jburos/explore-GP-models | 69aa7cdfe342ef2284bae8a349ea2a83a945d293 | 590f8a9f7d485a15a3fd174955d7cc37e661bc6d | refs/heads/master | 2021-01-13T14:56:22.099918 | 2016-12-16T14:46:38 | 2016-12-16T14:46:38 | 76,661,277 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,030 | r | 0.3 Fit simple GP model to simulated data.R | library(rstan)
library(tidyverse)
library(ggplot2)
library(lazyeval)
source('sim_data.function.R')
gp2 <- "
data {
int<lower=1> N;
vector[N] x;
vector[N] y;
}
transformed data {
vector[N] mu;
for (i in 1:N) mu[i] = 0;
}
parameters {
real<lower=0> eta_sq;
real<lower=0> inv_rho_sq;
real<lower=0> sigma_sq;
}
transformed parameters {
real<lower=0> rho_sq;
rho_sq = inv(inv_rho_sq);
}
model {
matrix[N, N] Sigma;
// off-diagonal elements
for (i in 1:(N-1)) {
for (j in (i+1):N) {
Sigma[i, j] = eta_sq * exp(-rho_sq * pow(x[i] - x[j],2));
Sigma[j, i] = Sigma[i, j];
}
}
// diagonal elements
for (k in 1:N)
Sigma[k, k] = eta_sq + sigma_sq; // + jitter
eta_sq ~ cauchy(0, 5);
inv_rho_sq ~ cauchy(0, 5);
sigma_sq ~ cauchy(0, 5);
y ~ multi_normal(mu, Sigma);
}
"
df <- sim_data(n=1000, sigma_sq=0.1, eta_sq=10, rho_sq=0.1)
ggplot(df, aes(x=x, y=y)) + geom_line()
standata <- list(N=nrow(df), x=df$x, y=df$y)
fit <- rstan::stan(model_code=gp2, data=standata)
print(fit)
|
e5a6fc954d5bf467479582bcb879a04fe50140a9 | 1ff5948cc363d8a195697c5ea3ae3e8505c7898d | /R/createGif.R | b93e5d7d5c2691c5a9e9a5ec92ffdca11417a0d2 | [] | no_license | MazamaScience/TBCellGrowth | 35399ef9918343145144c4330915f403d135a0b8 | 92e258ea7414361ad65136f3493e9ab9fdf16495 | refs/heads/master | 2020-12-11T03:32:19.119655 | 2016-04-19T22:00:30 | 2016-04-19T22:00:30 | 38,266,687 | 0 | 0 | null | 2015-06-29T19:25:26 | 2015-06-29T19:25:26 | null | UTF-8 | R | false | false | 1,816 | r | createGif.R | #' @export
#' @title Creates a gif animation.
#' @param dir the directory of images to read
#' @param filename name of output gif.
#' @param ext the image file extension to read.
#' @param framerate the number of frames per second in the output gif.
#' @param rescale dimensions of gif frames as a percent of original image size.
#' @description Creates an animated .gif file from a list of images using ImageMagick.
#' @return none
createGif <- function(dir, filename, ext="jpg", framerate=2, rescale=80) {
delay <- 100 / framerate
inPath <- paste0(normalizePath(path.expand(dir)), '/*.', ext)
outPath <- paste0(normalizePath(path.expand(dir)), '/', filename)
inPath <- gsub(" ", "\\\\ ", inPath)
outPath <- gsub(" ", "\\\\ ", outPath)
if ( Sys.info()['sysname'] == "Windows" ) {
shell(paste0('convert -resize "', rescale, '%" -delay ', delay, ' ', inPath, ' ', outPath))
} else {
system(paste0('convert -resize "', rescale, '%" -delay ', delay, ' ', inPath, ' ', outPath))
}
}
# Accepts a list of matrices and creates a gif
createGifFromList <- function(images, filename, delay=15, rescale=100) {
# Temporary directory location. We store images here
###tempDir <- "temp2234g12hdq5gp/"
tempDir <- tempdir()
# Create the directory. Hide warnings if it already exists.
dir.create(tempDir, showWarnings = FALSE)
# Save each frame as a .tiff file
for (i in 1:length(images)) {
j <- ifelse(i<10,paste0("0",i),i)
images[[i]][is.na(images[[i]])] <- 0
EBImage::writeImage(images[[i]],paste0(tempDir,j,".tiff"))
}
# Convert images to gif using a system call to ImageMagick
system(paste0('convert -resize "', rescale, '%" -delay ', delay, ' ', tempDir, '*.tiff ', filename))
# Delete temporary directory
unlink(tempDir, recursive=TRUE)
}
|
0cde6d528fd0ff7c2e5ebbb6020a7e7322822e12 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GLDEX/examples/fun.RMFMKL.ml.m.Rd.R | 8bd5aac3cc6856f96689b617278962ff158c5587 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 323 | r | fun.RMFMKL.ml.m.Rd.R | library(GLDEX)
### Name: fun.RMFMKL.ml.m
### Title: Fit RS generalised lambda distribution to data set using maximum
### likelihood estimation
### Aliases: fun.RMFMKL.ml.m
### Keywords: smooth
### ** Examples
## Fitting the normal distribution
# fun.RMFMKL.ml.m(data=rnorm(1000,2,3),fmkl.init=c(-0.25,1.5),leap=3)
|
70a77289f1d652bed6472ff576572b48d4d93c5e | 8f4687e2fd3c7fed3af1d443903d604921d2f289 | /R/centroidAssigner.R | e87a34e33c5754a0e06780f33383331191a6413f | [] | no_license | LuisLauM/ruisu | c446ae644abacc8f00cf6b64fea76891e111c683 | 76d8476586d5259f7014dfb3a7cc29b6efe2e017 | refs/heads/master | 2023-07-09T19:19:53.394787 | 2023-07-07T15:20:31 | 2023-07-07T15:20:31 | 41,390,085 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,317 | r | centroidAssigner.R |
# Main fx -----------------------------------------------------------------
#' @name centroidAssigner
#' @aliases centroidAssigner
#' @title Returns centroid values from grid codes.
#'
#' @description This function takes a vector of grid codes and returns centroids (center of mass) in lon-lat values.
#'
#' @param code Vector with grid codes.
#' @param what \code{character} indicating whether AIP or Marsden squares are going to be used as reference.
#' @param ... Extra arguments passed to selected method.
#'
#' @details For \code{what = "isoparalitoral"} (default), it allows to use argument \code{old}, for specifying whether
#' to use old AIP shape (\code{AIPShapefile_old}) o the new (\code{AIPShapefile_new}).
#'
#' @export
#'
#' @examples
#' isopCodes <- c(1050, 4043, 17073, 27103)
#' centroidAssigner(code = isopCodes, what = "isoparalitoral")
#'
#' marsdenCodes <- c("A6", "B8", "c12")
#' centroidAssigner(code = marsdenCodes, what = "marsden")
centroidAssigner <- function(code, what = "isoparalitoral", ...){
output <- switch(what,
isoparalitoral = centroidAssigner_isop(code = code, ...),
marsden = centroidAssigner_marsden(code = tolower(code), ...),
"Incorrect value for 'what'. See help for checking available methods.")
colnames(output) <- c("code", "lon", "lat")
rownames(output) <- seq(nrow(output))
return(output)
}
# Auxiliar fx -------------------------------------------------------------
centroidAssigner_isop <- function(code, old = TRUE){
isoAreas <- if(isTRUE(old)) ruisu::AIPData_old else ruisu::AIPData_new
index <- match(code, isoAreas$code)
output <- data.frame(code, isoAreas[index, c("x", "y")])
return(output)
}
centroidAssigner_marsden <- function(code){
code <- gsub(x = code, pattern = "[[:space:]]|[[:punct:]]", replacement = "", perl = TRUE)
letterCode <- gsub(x = code, pattern = "[0-9]", replacement = "", perl = TRUE)
numberCode <- an(gsub(x = code, pattern = "[a-z]", replacement = "", perl = TRUE))
letterCode <- -seq(3.5, 19.5)[sapply(letterCode, function(x) match(letters[1:17], x = x))]
numberCode <- -(numberCode + 69.5)
numberCode[is.na(letterCode)] <- NA
output <- data.frame(toupper(code), numberCode, letterCode, stringsAsFactors = FALSE)
return(output)
}
|
df485889e9a58d67c18684b6ca7cb62cdb8c9a37 | 05b2d51dc5c9b4e3845d9c92223ab65f15d21450 | /examMarks/R/generateAnswerSheets.R | f3ad0fd8228bc8cc433e6f6ad253fc977ee38b0c | [] | no_license | ddavis3739/examMarks-R-package | ba83d37970d2f7d9893c0267ba25f638d0388113 | 4cd4e6a4c372beacc98ba210a4d8d066d1a4dde1 | refs/heads/master | 2020-07-30T09:31:07.897957 | 2019-09-22T16:50:15 | 2019-09-22T16:50:15 | 210,173,690 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,761 | r | generateAnswerSheets.R | #' @title Student Answers for Specific Student
#'
#' @description Outputs a students random answers to a multiple choice exam in
#' the form of a dataframe given the number of questions asked and total number
#' of questions available. Answers can be from a to e, with NA's indicating the
#' question was not answered. If writeToFile = TRUE, then studentID and moduleID
#' must be provided so that the appropriate file name can be created. Files are
#' created as .tsv's.
#'
#' @param totalNumberofQuestions the total number of questions in an exam
#'
#' @param numberOfQuestionsToAnswer the amount of questions that the student is
#' asked
#'
#' @param writeToFile if TRUE, a file is created with the dataframe inside. The
#' default value is set to FALSE.
#'
#' @param molduleID a string identifying the name of the module. Only
#' needed if a file is being written.
#'
#' @param studentID a string or value identifying the student that took the exam
#' Only needed if a file is being written.
#'
#' @return a dataframe with a question and answer column with the option to
#' write dataframe to a file
#'
#' @examples
#' ## create answers for an exam with 100 questions and 30 questions asked
#' generateStudentAnswersForExam(100, 30)
#'
#' ## write the student's randomized answers to a file "BS281_answers_12.tsv"
#' generateStudentAnswersForExam(100, 30, writeToFile = TRUE,
#' moduleID = 'BS281', studentID = '12')
#'
#' @author Andrew Davis \email{[email protected]}
generateStudentAnswersForExam = function(totalNumberofQuestions,
numberOfQuestionsToAnswer,
writeToFile = FALSE, moduleID,
studentID){
# make sure numberOfQuestionsToAnswer < totalNumberofQuestions
stopifnot(numberOfQuestionsToAnswer < totalNumberofQuestions)
# grab a random subset of questions from the total number of questions
# number grabbed depends on numberOfQuestionsToAnswer
question = sort(sample.int(totalNumberofQuestions, numberOfQuestionsToAnswer))
# randomly generate the students answers for the questions asked
# NA values are included for questiosn that the student skipped
answer = sample(c(letters[1:5], NA), numberOfQuestionsToAnswer,
replace = TRUE)
# merge question and answer into dataframe for output
stuAnswers = data.frame(question, answer)
# write dataframe to file using moduleID and studentID
if(writeToFile == TRUE) {
filename = paste0(moduleID, '_answers_', studentID, '.tsv')
write.table(stuAnswers, file = filename,
row.names = F, quote = F, col.names = T)
}
# or just ourput dataframe to console
return(stuAnswers)
}
#' @title All Student Answers for a Given Exam
#'
#' @description Outputs random answers to a multiple choice exam for a given
#' module for all students. Answers can be from a to e, with NA's indicating the
#' question was not answered. If writeToFile = TRUE, then a folder is created
#' with all of the student answers. A file is also created with the list of
#' students that took a given exam. Files are created as .tsv's. If
#' readFromFiles = TRUE, then the arguements numberOfQuestions, allStudentIDs,
#' and examsPerSubject are read from files instead of from dataframes.
#'
#' @param molduleID a string identifying the name of the module.
#'
#' @param numberOfQuestions the dataframe that contains the amount of questions
#' each student needs to answer for each exam. The defualt value, questions, is
#' a dataframe included in the package.
#'
#' @param allStudentIDs the dataframe that contains the ID for each student and
#' what degree course they are on. The defualt value, students, is a dataframe
#' included in the package.
#'
#' @param examsPerSubject the dataframe that contains a dataframe that lists
#' what modules a given degree course takes. The first column should list
#' modules, the second column should list the options for options for
#' degree 1, and the third column should list options for
#' degree 2. The possible options are "Yes", "No", and "Optional". If a string
#' is supplied that is not one of the three then it is evaluated as "Yes". if a
#' module is "Optional", then a random amount of students is picked to take the
#' exam, with a higher number of students being more likely. The defualt value,
#' exams, is a dataframe included in the package, with degree 1 being
#' "Biological Sciences" and degree 2 being "Genetics".
#'
#' @param writeToFile if TRUE, a folder, named based on the moduleID is created
#' with a file for each student's answers. A file is also created that lists all
#' of the students that took the exam. The default value is set to FALSE.
#'
#' @param readFromFiles if TRUE, filenames are used to read in data for the
#' relevant arguements instead of dataframes within R.
#'
#' @param degreeNames if degree names are not "Biological Sciences" and
#' "Genetics" then a string should be entered with the two degree courses that
#' the student set belongs to.
#'
#' @return A list with 2 elements, a data frame of students that took the module
#' and a list of answers by each student. If writeToFile = TRUE, then files are
#' written instead.
#'
#' @examples
#' ## create answers for BS284 and output to console
#' generateAllStudentsAnswersForExam('BS284', writeToFile = FALSE)
#'
#' ## create files with student answer files and a list of students taking exam
#' generateAllStudentsAnswersForExam('BS284', writeToFile = TRUE)
#'
#' @author Andrew Davis \email{[email protected]}
generateAllStudentsAnswersForExam = function(moduleID,
numberOfQuestions = questions, allStudentIDs = students,
examsPerSubject = exams, writeToFile = FALSE, readFromFiles = FALSE,
degreeNames = NULL){
# read in files from arguements if arguements are interpreted as filenames
if(readFromFiles == TRUE){
numberOfQuestions = read.table(file = number_of_questions, header = T)
# add total number fo questions asked for each exam to numberOfQuestions
totalQuestions = NULL
for(i in 1:5){
totalQuestions[i] = length(readLines(
paste0('correct_answers_BS28', i, '.dat')))
}
numberOfQuestions = cbind(numberOfQuestions, totalQuestions)
allStudentIDs = read.table(file = allStudentIDs, header = T)
examsPerSubject = read.table(file = examsPerSubject, header = T)
}
# otherwise just interpret arguements as objects
else{ }
# edit degree names if degreeNames is not NULL
if(!is.null(degreeNames)) degree = degreeNames
else degree = c("Biological Sciences", "Genetics")
colnames(examsPerSubject) =c('module', degree[1], degree[2])
# add in stop and warning statements to ensure data is ran correctly
if(moduleID %in% examsPerSubject[,1] == FALSE){
stop('moduleID not listed in examsPerSubject')
}
stopifnot(names(examsPerSubject) == c("module", degree[1], degree[2]))
stopifnot(unique(allStudentIDs[,2]) == degree)
if(numberOfQuestions[numberOfQuestions[,1] == moduleID, 2] >
numberOfQuestions[numberOfQuestions[,1] == moduleID, 3]){
stop('Number of questions asked is more than in module exam answer key')
}
# subset students if only genetic students in module
if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'No'){
allStudentIDs = allStudentIDs[allStudentIDs[,2] == degree[2],]
}
# subset students if only biological sciences in module
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'No'){
allStudentIDs = allStudentIDs[allStudentIDs[,2] == degree[1],]
}
# subset students if optional for genetics
# select all biological sciences and a random subset of genetics
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'Optional'){
bioStudents = allStudentIDs[allStudentIDs[,2] == degree[1],]
geneticsOpt = allStudentIDs[allStudentIDs[,2] == degree[2],]
geneticsOpt = geneticsOpt[sample(1:nrow(geneticsOpt),
sample(1:nrow(geneticsOpt), 1,
prob = seq(.2, .8, .6/
(nrow(geneticsOpt) - 1)))),]
allStudentIDs = rbind(bioStudents, geneticsOpt)
}
# subset students if optional for biological sciences
# select all genetics and a random subset of biological sciences
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'Optional'){
geneticsStudents = allStudentIDs[allStudentIDs[,2] == degree[1],]
bioOpt = allStudentIDs[allStudentIDs[,2] == degree[2],]
bioOpt = bioOpt[sample(1:nrow(bioOpt), sample(1:nrow(bioOpt), 1,
prob = seq(.2, .8, .6/
(nrow(bioOpt) - 1)))),]
allStudentIDs = rbind(geneticsStudents, bioOpt)
}
# select random number fo students if module is optional for both degrees
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'Optional' &
examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'Optional'){
allStudentIDs = allStudentIDs[sample(1:nrow(allStudentIDs),
sample(1:nrow(allStudentIDs), 1,
prob = seq(.2, .8, .6/
(nrow(allStudentIDs) - 1)))),]
}
# if both degrees take course, then no need to subset students
else { }
# write student answers to files in a folder
if(writeToFile == TRUE) {
# create directory and change working directory to place files inside
dir.create(paste0(moduleID, 'studentAnswerFiles'))
setwd(paste0(moduleID, 'studentAnswerFiles'))
# use generateStudentAnswersForExam function to create student answers for
# all students
for(i in allStudentIDs[,1]) {
generateStudentAnswersForExam(
numberOfQuestions[numberOfQuestions == moduleID, 3],
numberOfQuestions[numberOfQuestions == moduleID, 2],
TRUE, moduleID, i)
}
# reset workign directory to original location
setwd('../')
# create file with list of students to be able to check what students were
# randomly selected
filename = paste('studentList_', moduleID, '.tsv', sep = '')
write.table(allStudentIDs, file = filename, col.names = TRUE,
row.names = FALSE)
}
# otherwise output student answers to console
else {
# use generateStudentAnswersForExam to create list of answers for each
# student with exact number of answers and questions contingent on moduleID
allStuAnswers = lapply(allStudentIDs[,1], generateStudentAnswersForExam,
totalNumberofQuestions =
numberOfQuestions[numberOfQuestions ==moduleID, 3],
numberOfQuestionsToAnswer =
numberOfQuestions[numberOfQuestions == moduleID, 2])
names(allStuAnswers) = allStudentIDs[,1]
# create list with the dataframe of all students taking exam and the list of
# student answers
allStuAnswers = list(allStudentIDs, allStuAnswers)
names(allStuAnswers) = c('student list', 'answers')
return(allStuAnswers)
}
}
#' @title Generate Exam Answer Key
#'
#' @description Outputs a randomized answer key for a given exam based on
#' number of questions. If writeToFile = TRUE, then moduleID must be prodived to
#' create the file name.
#'
#' @param numberOfQuestions a numerica value that specifies how many answers
#' should be generated for the key.
#'
#' @param writeToFile if TRUE, a file, named based on the moduleID, is created
#' with the answer key inside.
#'
#' @param moduleID a string that designates what the name of the module is. If
#' writeToFile = TRUE, then this arguement must be specified.
#'
#' @param ansOptions the possible answers in the multiple choice exam key. The
#' default value, letters[1:5], specifies that there are 5 different options for
#' each question, a, b, c, d, and e.
#'
#' @return a vector that contains a randomized answer for each question number.
#' If writeToFile = TRUE, then this vector is written to a file.
#'
#' @examples
#' ## create 100 question answer key
#' createAnswerKey(100)
#'
#' ## write answer to key yot file
#' createAnswerKey(100, writeToFile = TRUE, 'BS281')
#'
#' @author Andrew Davis \email{[email protected]}
createAnswerKey = function(numberOfQuestions, writeToFile = FALSE, moduleID,
ansOptions = letters[1:5]){
answer = sample(ansOptions, numberOfQuestions, replace = TRUE)
if(writeToFile == TRUE){
write.table(answer, file = paste0('correct_answers_', moduleID, '.dat'),
row.names = F, quote = F, col.names = 'answer')
}
else return(answer)
}
|
fd63363d44cbdd24f3e06954977123dc65a836e5 | 85b32dcd701ddf5ea9e95454fceb10687c03b2bb | /R/plot.rpsftm.R | 4b241c6d2db8040ab17d9cbbed9454f7e48762eb | [] | no_license | arallison/rpsftm | 4cdb97a9fc58bd60f9d8ad03d718d9611956025e | 466ad85e26949b8990f0fa704d20cfa0e9c7cb87 | refs/heads/master | 2021-01-17T22:20:52.103628 | 2016-01-05T17:25:34 | 2016-01-05T17:25:34 | 47,696,890 | 0 | 0 | null | 2015-12-09T14:39:52 | 2015-12-09T14:39:51 | null | UTF-8 | R | false | false | 668 | r | plot.rpsftm.R | #'Function used to plot the KM curves of the treatment-free transformed times
#'
#'@export
#'@title Plot Method
#'@name plot.rpsftm
#' @param x an object returned from the \code{\link{rpsftm}} function
#' @return a ggplot plot of the fitted KM curves
#' @author Simon Bond
#'
plot.rpsftm=function(x){
fit=x$fit
df=data.frame(Time=fit$time, Survival=fit$surv, upper=fit$upper, lower=fit$lower )
df$Group=rep(names(fit$strata),fit$strata)
ggplot2::ggplot( data=df,ggplot2::aes(x=Time, y=Survival, group=Group, lty=Group) )+
ggplot2::geom_step()+
ggplot2::ylim(0,1)+
ggplot2::labs(title="KM Plots of Transformed Treatment-Free Time")
} |
66e7febb083ab853bb62498e6921410eb4cde2b7 | 406863c152cebfe70acd9a1d23cf063aed08cfd4 | /4. guardian_text_analysis.R | 78b1f9cdf174c2b72f614ea4a44e2a6a4ec43443 | [] | no_license | lopesmf/R-Projects | cdb19faa2c25cecc9cdba5a5d68a18c921926456 | d7ae205169e1d4eb8a986d45577218ecbd1ffcfd | refs/heads/main | 2023-04-20T10:25:18.701726 | 2021-05-10T18:34:54 | 2021-05-10T18:34:54 | 366,140,270 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,268 | r | 4. guardian_text_analysis.R | # Topic Analysis in news about AfD in The Guardian Newspaper
# Maria Fernanda Lopes Ferreira Del Ducca
# au516257 201400917
# Clean list
rm(list = ls())
# Set Working Directory
setwd("~/Documents/Masters'/2.2018/Political Data Science/Final Exam/The Guardian")
# Install necessary packages
library(tm)
library(quanteda)
# Load data
load("~/Documents/Masters'/2.2018/Political Data Science/Final Exam/The Guardian/afd_guardian_articles_full_text.RData")
###############
### Organizing data ###
#Formatting dates: Transforming factor to Date
afd$my_dates <- as.Date(afd$my_dates, format = "%Y-%m-%d")
## I need to divide my data into the 4 phases identified in the literature.
## This phases will serve as a way to analyse better the content I extracted from The Guardian website
## To have the 4 phases to analyse, I need to do two procedures.
#Selecting the phases in the timeline to do the analysis by period
phase_1 <- afd[afd$my_dates >="2013-01-01" & afd$my_dates <= "2014-05-31",]
phase_2 <- afd[afd$my_dates >="2014-06-01" & afd$my_dates <= "2015-07-30",]
phase_3 <- afd[afd$my_dates >="2015-08-01" & afd$my_dates <= "2017-09-30",]
phase_4 <- afd[afd$my_dates >="2017-10-01",]
###############
### Text Organization and Formatting ###
# Extract from my dataframe, as a value, only the texts from The Guardian
articles.guardian <- gsub(":", " ", afd$full_text, fixed = T)
# Using tokens to remove symbols, punctuation, separators, urls from the words in the texts
articles.guardian <- tokens(articles.guardian, what = "word",
remove_punct = T,
remove_symbols = T,
remove_separators = T,
remove_url = T,
remove_hyphens = T,
verbose = T)
# Transforming all words to lower case
articles.guardian <- tokens_tolower(articles.guardian)
# Removing english stopwords
articles.guardian <- tokens_remove(articles.guardian, stopwords("english"))
# Stem words
articles.guardian <- tokens_wordstem(articles.guardian)
# Remove stopwords after stem the text
articles.guardian <- tokens_remove(articles.guardian, stopwords("english"))
# Creating a dfm (document feature matrix) with the tokenized articles
guardian.dfm <- dfm(articles.guardian)
#############
##### 1st Analysis #####
#### Main Goal: Analyse the text from the entire data set from The Guardian
### After Procedure: Compare to the results I will get by analysing the text phase by phase
## Top Features Observation ##
# Check the top features in the document feature matrix
# This is to observe if there is any other non-wanted object in the to be analysed dataframe
topfeatures(guardian.dfm)
# Keep in the dfm only words that appear more than 4 articles
guardian.dfm <- dfm_trim(guardian.dfm, min_termfreq = 4)
# Selecting specific words (words that are not useful for my analysis) to be removed from my dataframe matrix
wordstoremove <- c("afd", "germani", "german", "said", "year", "link", "post",
"guardian", "merkel", "parti", "leader", "say", "AfD", "germany", "german",
"twitter", "tweet", "said",
"year", "merkel", "say",
"around", "although", "though",
"allow", "alpha", "blame", "call",
"ago", "airless", "afd",
"found", "tag", "facebook", "instagram",
"monday", "tuesday", "wednesday",
"thursday", "friday", "saturday", "sunday")
guardian.dfm <- dfm_remove(guardian.dfm, wordstoremove)
#Checking the top features after removing non-wanted words
topfeatures(guardian.dfm)
## Selecting features by importance in a document ##
# Create tf_idf-weighted dfm
# (The Tf-idf is the frequency of a term adjusted for how rarely it is used)
guardian.tfidf <- dfm_tfidf(guardian.dfm)
# Select from main dfm using its top features
guardian.dfm <- dfm_keep(guardian.dfm, names(topfeatures(guardian.tfidf, n = 1000)))
## Run the topic models ##
# Goal: Want to observe the words appearing in the topic models, where I have 4 randomized clusters
# Convert 'quanteda dfm' to 'tm dtm' #better to run the topic models
guardian.dtm <- convert(guardian.dfm, to = "topicmodels")
# Using Latent Dirichlet Allocation (LDA) to run the topic model (Choice of 4 clusters)
guardian.lda <- LDA(guardian.dtm, k = 4)
# review terms by topic
terms(guardian.lda, 15)
#############
# randomly sample test data
set.seed(45)
select <- sample(1:nrow(guardian.dtm), size = 10)
test <- guardian.dtm[select, ]
train <- guardian.dtm[!(1:nrow(guardian.dtm) %in% select), ]
n.tops <- 2:10
metrics <- data.frame(topics = n.tops,
perplexity = NA)
for(i in n.tops) { # NB: takes awhile to run
print(i)
est <- LDA(train, k = i)
metrics[(i - 1), "perplexity"] <- perplexity(est, newdata = test)
}
save(metrics, file = "lda_perplexity.RData")
qplot(data = metrics, x = topics, y = perplexity, geom = "line",
xlab = "Number of topics",
ylab = "Perplexity on test data") + theme_bw()
############################################################
## RERUN WITH BETTER CHOICE OF k
############################################################
# run lda with 10 topics
lda <- LDA(guardian.dtm, k = 10)
save(lda, file = "dr_ft_keep.RData")
# examine output
terms(lda, 10)
# put topics into original data
afd$topics <- topics(lda)
# simple frequency
qplot(data = dat, x = topic, geom = "bar")
# add labels
dat$topic_lab <- factor(dat$topic,
levels = 1:14,
labels = c("Topic 1", "Topic 2", "Topic 3", "Topic 4", "Topic 5",
"Topic 6", "Topic 7", "Topic 8", "Topic 9", "Topic 10",
"Topic 11", "Topic 12", "Topic 13", "Topic 14"))
# better frequency
qplot(data = dat, x = topic_lab,
geom = "bar", xlab = "",
ylab = "Frequency", main = "DR Politics topic frequencies December 2017") +
theme_bw() +
theme(axis.text.x = element_text(angle = 90))
|
11838687f092ddf05938cdf01ef6dc31c2a1309c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/satellite/examples/plot.Rd.R | 2c311a4ff0e6a77688ba2a2d9798ba0a29e8e96b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 485 | r | plot.Rd.R | library(satellite)
### Name: plot
### Title: Plot a Satellite object
### Aliases: plot plot,Satellite,ANY-method plot,Satellite-method
### ** Examples
## Not run:
##D ## sample data
##D path <- system.file("extdata", package = "satellite")
##D files <- list.files(path, pattern = glob2rx("LC08*.TIF"), full.names = TRUE)
##D sat <- satellite(files)
##D
##D ## display data without quality flag layer
##D bds <- getSatBCDE(sat)[1:11]
##D plot(sat, bcde = bds)
## End(Not run)
|
4682ce2936f667f6c11e916217eac01f0ac7a345 | f20c8919a5a46ec2503ec02807877c1e96a9cee0 | /CPR by Nationality.R | 9bc27ede4ae31c4822786faf15177eac59609d86 | [
"MIT",
"CC-BY-4.0"
] | permissive | Track20/JordanFertilityFP | 78aceac6560091530c80805198119c87dec02799 | 4f7d4cd2f7421386bef1d5e1cd299751d590efd9 | refs/heads/master | 2022-12-22T17:09:02.702079 | 2020-09-29T14:17:34 | 2020-09-29T14:17:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,233 | r | CPR by Nationality.R | # Jordan Feritlity and Family Planning
# Kristin Bietsch
# 09/18/20
# CPR by Nationality
library(survey)
library(tibble)
library(dplyr)
library(tidyr)
library(haven)
library(xlsx)
library(stringr)
library(questionr)
library(jtools)
library(huxtable)
library(broom)
library(ggplot2)
setwd("C:/Users/KristinBietsch/files/DHS Data/Jordan")
women17 <- read_dta("JOIR73FL.DTA")
women12 <- read_dta("JOIR6CFL.DTA")
nationality2012 <- read_dta("nationality2012.DTA")
women12 <- left_join(women12, nationality2012, by=c("v000", "v001", "v002", "v003"))
# just comparing jordanians to syrians
women17 <- women17 %>% mutate(syrian=case_when(s123a==3 ~ 1,
s123a==1 ~ 0),
cpr=case_when(v312!=0 ~ 1,
v312==0 ~ 0),
year=1)
women12 <- women12 %>% mutate(syrian=case_when(sh07==3 ~ 1,
sh07==1 ~ 0),
cpr=case_when(v312!=0 ~ 1,
v312==0 ~ 0),
year=0)
women17$sampleweights <- women17$v005/1000000
women12$sampleweights <- women12$v005/1000000
design.women17 <- svydesign(ids=~v021, strata=~v025, weights=~sampleweights, data=women17, nest=TRUE)
options(survey.lonely.psu="adjust")
design.women12 <- svydesign(ids=~v021, strata=~v025, weights=~sampleweights, data=women12, nest=TRUE)
options(survey.lonely.psu="adjust")
# JOINT
joint_women <- bind_rows(women17, women12)
joint_women <- joint_women %>% mutate(v001_joint=case_when(year==1 ~ v001,
year==0 ~ v001 + 1000),
v023_joint=case_when(year==1 ~ v023,
year==0 ~ v023 + 100))
sel <- select(joint_women, v001, v023, v001_joint, v023_joint, year)
design.womenjoint <- svydesign(ids=~v001_joint, strata=~v023_joint, weights=~sampleweights, data=joint_women, nest=TRUE)
options(survey.lonely.psu="adjust")
svyby( ~cpr , ~ syrian , subset(design.women17, v502==1), svymean)
summ(svyglm( cpr ~ + syrian , subset(design.women17, v502==1), family=quasibinomial() ), confint=TRUE)
svyby( ~cpr , ~ syrian , subset(design.women12, v502==1), svymean)
summ(svyglm( cpr ~ + syrian , subset(design.women12, v502==1), family=quasibinomial() ), confint=TRUE)
svyby( ~cpr , ~ year , subset(design.womenjoint, v502==1), svymean)
svyby( ~cpr , ~ year , subset(design.womenjoint, v502==1 & syrian==0), svymean)
svyby( ~cpr , ~ year , subset(design.womenjoint, v502==1 & syrian==1), svymean)
summ(svyglm( cpr ~ + year , subset(design.womenjoint, v502==1), family=quasibinomial() ), confint=TRUE)
summ(svyglm( cpr ~ + year , subset(design.womenjoint, v502==1 & syrian==0), family=quasibinomial() ), confint=TRUE)
summ(svyglm( cpr ~ + year , subset(design.womenjoint, v502==1 & syrian==1), family=quasibinomial() ), confint=TRUE)
summ(svyglm( cpr ~ + year + syrian , subset(design.womenjoint, v502==1), family=quasibinomial() ), confint=TRUE)
|
df97f1f4b577ffd3b7a41c516268d2c0168c0f3c | bc88ee08398aa52f9eab0c4f608f45156f9551f3 | /tidytuesday_2020_week16/tidytuesday_2020_week16.r | 7ba2b701a1062fafbd2f04e9f1a1d33408c68238 | [] | no_license | lmaxfield/TidyTuesday | df432698acfb1311f53cc5c081d6c7021ba14299 | 999ee128c925ff39a9596d3ee8f4fd8a934a04c5 | refs/heads/master | 2020-09-14T16:45:28.588273 | 2020-04-18T17:08:59 | 2020-04-18T17:08:59 | 223,189,027 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,101 | r | tidytuesday_2020_week16.r |
library(tidyverse)
library(extrafont)
#library(here)
polls <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-14/polls.csv')
rankings <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-14/rankings.csv')
my_theme <- function() {
fun <- theme(text = element_text(family = "Roboto Condensed"),
rect = element_blank(),
legend.position = 'none',
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(colour = "grey"),
panel.grid.minor.y = element_blank(),
axis.ticks.x = element_line(colour = "grey"),
axis.ticks.y = element_blank(),
axis.title = element_text(size = 12),
plot.title = element_text(size = 14),
axis.title.y = element_blank()
)
}
top_artists <- rankings %>%
group_by(artist) %>%
summarise(total_points = sum(points), num_songs = n()) %>%
mutate(points_per_song = total_points/num_songs) %>%
arrange(desc(total_points)) %>%
head(10)
rankings %>%
select(ID, title, artist, year, points) %>%
right_join(top_artists, by = c("artist")) %>%
arrange(artist, desc(points)) %>%
group_by(artist) %>%
mutate(artist_song_number = row_number()) %>%
ggplot(aes(x=reorder(artist, (.$total_points)), y=points)) +
geom_bar(aes(fill=reorder(artist_song_number, (points))), stat = "identity", colour = 'grey') +
scale_fill_viridis_d(direction = -1) +
scale_x_discrete(labels = function(x) str_wrap(str_replace_all(x, 'foo', ' '), width = 20)) +
labs(title = "Hip Hop Artists with Top Ranking Songs",
subtitle = "Spot the one-hit-wonder") +
ylab('Points awarded by critics') +
annotate('text', "JAY-Z", 120, label = "16 JAY-Z songs \n were voted for!", size = 2.5, family = "Roboto Condensed") +
my_theme() +
coord_flip() +
theme(axis.title.y = element_blank())
#ggsave(filename = here("plots/tidy_tuesday_2020_week16.png"), width = 14, height = 11, units = "cm")
|
f0b976b6be91b97476cb9dcc824448e5f699dff3 | c7bc2076579a554bca7ddb1c91c5c5197dd3e4d4 | /Scripts/Analysis/plotDemographics.R | d3e2f2d493a267fcafeb3f413ff31a7902c618f9 | [] | no_license | ntustison/CrossLong | c55bf03fe3e85ea0f9c7267e4ebb76243940aaff | e6cdd22a7c950b2a68c3075c893ed1b74489bc61 | refs/heads/master | 2021-12-15T15:24:49.733578 | 2021-12-13T22:16:37 | 2021-12-13T22:16:37 | 62,259,676 | 9 | 3 | null | 2021-12-13T17:37:50 | 2016-06-29T21:39:39 | R | UTF-8 | R | false | false | 1,748 | r | plotDemographics.R | library( ggplot2 )
# Questions of interest:
# * Age ranges, male and females (https://rpubs.com/walkerke/pyramids_ggplot2)
# * Number of CN, LMCI, and AD
# * How many time points? Missing time points?
# baseDirectory <- '/Users/ntustison/Data/Public/CrossLong/'
baseDirectory <- '/Users/ntustison/Documents/Academic/InProgress/CrossLong/'
dataDirectory <- paste0( baseDirectory, 'Data/' )
adniData <- read.csv( paste0( dataDirectory, 'reconciled_ANTsCross.csv' ) )
adniData$DIAGNOSIS <- factor( adniData$DIAGNOSIS, levels = c( "CN", "LMCI", "AD" ) )
demoPlot <- ggplot( data = adniData, aes( VISIT ) ) +
geom_bar( aes( fill = SEX ), position = "dodge" ) +
facet_wrap( ~ DIAGNOSIS ) +
labs( y = "Count", x = "Visit" ) +
# scale_fill_manual( values = alpha( c( "navyblue", "darkred" ), 1.0 ) ) +
guides( fill = guide_legend( title = "Gender" ) )
ggsave( "demoPlot.png", plot = demoPlot, width = 8, height = 2.5, units = 'in', dpi = 300 )
# Create 2D histograms of AGE vs. MMSE at m12 (since it has the highest count)
adniDataFrame12 <- adniData[which( adniData$VISIT == 12 ),]
gg_color_hue <- function( n ) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
demoPlot2 <- ggplot( data = adniDataFrame12 ) +
stat_bin2d( aes( x = AGE, y = MMSCORE ), binwidth = c( 1.00, 1.75 ) ) +
facet_wrap( ~ DIAGNOSIS ) +
labs( y = "Mini-Mental State Examination", x = "Age" ) +
# scale_fill_gradientn( limits = c( 1, 15 ), colours = gg_color_hue( 10 ) ) +
guides( fill = guide_legend( title = "Count" ) )
ggsave( "demoPlot2.png", plot = demoPlot2, width = 8, height = 2.5, units = 'in', dpi = 300 )
|
f6c1ca88e29480dae81335162b4a60b573d5bb98 | ddf87d7410f5f63747758b8beaf0a4fe7c297796 | /man/ed_input_dir.Rd | 16fcf7afea5238fbaa56076436ab4adfb5d1c9fe | [
"MIT"
] | permissive | ashiklom/fortebaseline | 3142ff38f305906489cf6e012a8f55fa3efaa51e | 513ea9353c133b47f67b3023a78e56abb6384847 | refs/heads/master | 2021-07-23T16:30:12.074804 | 2020-04-30T17:15:43 | 2020-04-30T17:15:43 | 157,924,482 | 3 | 3 | NOASSERTION | 2023-01-22T10:39:45 | 2018-11-16T21:42:24 | R | UTF-8 | R | false | true | 241 | rd | ed_input_dir.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run-ed.R
\name{ed_input_dir}
\alias{ed_input_dir}
\title{Directory containing ED2 inputs}
\usage{
ed_input_dir()
}
\description{
Directory containing ED2 inputs
}
|
61eff8e13145a514d39167f0ed901605e333a51d | e1302be32d3fdd297dcf5182dd437188f43923f0 | /GOTApriori.R | 130df2bb63adffde1b89acf8b22d3d2d359fc9a2 | [] | no_license | kritishaw/imdb-database-mining | d5d4928a52ff3058b4faf0d562985374c4e1619d | d12590af7ae7249a289a547f73f5ed83825071f7 | refs/heads/master | 2021-01-01T04:45:13.823482 | 2016-05-19T20:02:50 | 2016-05-19T20:02:50 | 59,238,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 895 | r | GOTApriori.R | library(arules)
westeros <- read.csv("game_of_thrones.csv", header = TRUE)
westeros <- merge(westeros[, 2:3], as.data.frame(sapply(westeros[,4:10], as.logical)), by="row.names")
westeros <- as.data.frame(sapply(westeros, gsub,pattern="House ",replacement=""))
westeros <- westeros[, 2:10]
Grules <- apriori(westeros)
inspect(Grules)
#westeros <- as.data.frame(sapply(westeros, gsub,pattern="House ",replacement=""))
gc_rules <- apriori(westeros, parameter = list(supp = 0.01, conf = 0.9), appearance = list(rhs=c("Survives=TRUE", "Survives=FALSE"), default = "lhs"))
inspect(gc_rules)
gotrules_sorted <- sort(gc_rules, by = "lift")
subset.matrix <- is.subset(gotrules_sorted, gotrules_sorted)
subset.matrix[lower.tri(subset.matrix, diag = TRUE)] <- NA
red_gotrules <- colSums(subset.matrix, na.rm = TRUE) >= 1
pruned_gotrules <- gotrules_sorted[!red_gotrules]
inspect(pruned_gotrules)
|
58bf8d26f6e168e6944b7d0f89460c299a228e37 | 53aa1a158928b60ddf4195da23b9c72206be3005 | /man/getQtlMap.Rd | ee4b6664594b348f1791ed9d3e388ec7fbd6deb9 | [] | no_license | hickeyjohn/AlphaSimR | 55b202591506bbae31f7ddadecb333075a9d8c9e | 1cedc22e022b2a628da3e9e5bb1ca0394ea2078f | refs/heads/master | 2023-01-05T19:52:32.118965 | 2020-05-19T08:30:02 | 2020-05-19T08:30:02 | 299,920,977 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 858 | rd | getQtlMap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pullGeno.R
\name{getQtlMap}
\alias{getQtlMap}
\title{Get QTL genetic map}
\usage{
getQtlMap(trait = 1, gender = "A", simParam = NULL)
}
\arguments{
\item{trait}{an integer for the}
\item{gender}{determines which gender specific map
is returned. Options are "A" for average map, "F"
for female map, and "M" for male map. All options are
equivalent if not using gender specific maps.}
\item{simParam}{an object of \code{\link{SimParam}}}
}
\value{
Returns a data.frame for the SNP map.
}
\description{
Retrieves the genetic map for the
QTL of a given trait.
}
\examples{
#Create founder haplotypes
founderPop = quickHaplo(nInd=10, nChr=1, segSites=10)
#Set simulation parameters
SP = SimParam$new(founderPop)
SP$addTraitA(5)
#Pull SNP map
getQtlMap(trait=1, simParam=SP)
}
|
c6a7e00056bf95f5df4bfdcc2a088c29e5f3ec41 | d22a3a98441edc4713e16deff79bd6fef3e550b1 | /R/CBI_mapCreation.R | 4c62edbaac6339473e0e484fcbc116cabcce271b | [] | no_license | mxblsdl/CBREC_Maps | c0f559be659e3a1a3119a15f9e6536aa3a0367f3 | 9b357954b689272d48f9c43a0c2beaac1c4f7092 | refs/heads/master | 2022-12-11T05:19:43.830142 | 2020-09-10T02:29:50 | 2020-09-10T02:29:50 | 286,899,437 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,712 | r | CBI_mapCreation.R |
# bind data into data table function; works with specific list structure of outputs
bindDataTable <- function(data) {
require(data.table)
tr <-
lapply(data, function(d) {
# simplify list structure
d <- unlist(d, recursive = F)
# bind together as data table
d <- rbindlist(d)
return(d)
})
return(tr)
}
# for use in subsetting the scenario matrix based on map specifications
scenario_function <- function(scenario_matrix,
piled, # will be the same for use and reference
use_collection,
use_burn,
ref_collection = "No",
ref_burn,
negate_ref = F){
use <- subset(scenario_matrix, Fraction_Piled == piled &
Biomass_Collection == use_collection &
Burn_Type == use_burn)
# allow for no reference case burn to be explicitly defined
if(negate_ref) {
ref <- subset(scenario_matrix, Fraction_Piled == piled &
Biomass_Collection == ref_collection &
Burn_Type != ref_burn)
} else {
ref <- subset(scenario_matrix, Fraction_Piled == piled &
Biomass_Collection == ref_collection &
Burn_Type == ref_burn)
}
return(list("use_id" = use$ID,
"ref_id"= ref$ID))
}
## ggplot theme
mxblsdl <- theme_minimal(base_size = 12) +
theme(legend.position = "bottom",
#legend.title = element_blank(),
strip.text = element_text(size = 12),
axis.text = element_text(size = 12),
legend.text = element_text(size = 12))
|
826197c4f2ec9578ba6488b2d9ed5800c6e5182f | 53a5427633aaad81b865b71133b017d262a733be | /Kalman_test.R | 89eedd94743cd60ee72e7e581903ab81fbe3053c | [] | no_license | xindd/TestModels | 9529ab8a354e3f4bb6e0cdd8365ee184d903a61d | ff4e642eedfe64683ddd7eebd54f319a8ced273c | refs/heads/main | 2022-12-25T04:12:00.403632 | 2020-10-10T00:18:40 | 2020-10-10T00:18:40 | 302,779,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,571 | r | Kalman_test.R | kl <- function(P_data, T_data, p_dot, t_dot, PL_data, t_time, tL){
#t_time = c(0, 1/6, 1/3, 1/2, 1, 2, 4, 8, 16)
#tL = 1/6
num_data = length(t_time)
true_a = PL_data/tL
Z = matrix(c(p_dot-true_a, t_dot-true_a), nrow = num_data, byrow=FALSE)
GG <- diag(1,2)
FF <- matrix(c(1, 3, 3, 2),nrow=2,byrow=TRUE)
m0 <- c(0,0)
C0 <- diag(1,2)
W <- diag(1,2)
V <- diag(1,2)
X = matrix(c(-P_data,P_data-T_data, rep(0,num_data)),nrow=num_data,byrow=FALSE)
my_dlm <- dlm(X=X,JFF=FF,FF=FF,V=V,GG=GG,W=W,m0=m0, C0=C0)
y0 <- dlmFilter(Z,my_dlm)
y <- dlmSmooth(y0)
pre_Z = matrix(c(0,0),nrow = 1)
for(k in 1:num_data){
pre_Z=rbind(pre_Z,t(matrix(c(-P_data[k],0,0,(P_data[k]-T_data[k])),nrow=2,byrow=TRUE)%*%as.matrix(y$s[k+1,],nrow=2)))
}
pre_Z = dropFirst(pre_Z)
df <- data.frame(y=Z, s=dropFirst(y$s),pre=pre_Z)
df$id <- t_time
ggplot(data=df,aes(x=id,y=y.1))+ # P
geom_point()+
geom_point(aes(y=y.2),color="red")+ # T
geom_line(linetype=1,aes(y=pre.1),color="orange")+ #pre_P
geom_line(linetype=1,aes(y=pre.2),color="darkred") # pre_T
ggplot(data=df,aes(x=id,y=s.1))+ # P
geom_line()+
geom_line(linetype=1,aes(y=s.2),color="darkred") # b
}
num_data = 100
t_time = seq(0.1,10,0.1)
P_data = rpkms$total_introns['77595',]
T_data = rpkms$total_exons['77595',]
p_dot =
t_dot =
PL_data = rpkms$foursu_exons['77595',]
t_time = c(0, 1/6, 1/3, 1/2, 1, 2, 4, 8, 16)
tL = 1/6
kl(P_data, T_data, p_dot, t_dot, PL_data, t_time, tL)
|
3c33a0232101a0b0c90fb9aa10819e0b031973e6 | 18bdef50ddaf2647ddec6308bf5d883dc6ebd995 | /NolaR/R/nola.R | d125b98008af21e13b8886e9d17d0b1d032a91f3 | [] | no_license | jsomekh/Nola | 0cdf40dc2353c7f21d0b8fe84eae4689b558e7c5 | 15a643b2736ef6b3467d64883b1306ffe9e8ac0c | refs/heads/master | 2020-03-09T18:30:32.338451 | 2018-04-11T10:11:24 | 2018-04-11T10:11:24 | 128,934,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 171 | r | nola.R | #' A Nola Function
#'
#' This function allows you to print hi Nola.
#' @param no params.
#' @keywords nola
myNola<-function()
{
print("Hi Nola")
print("Hi Jinjit")
}
|
d508ac209b72aeb49ecda6bb2fbf8cb6a71e45b7 | 15b43be459adec4f54507aafe759f61f1eadd5d5 | /man/rtimes-package.Rd | 962983ac9351f84e965e44581fc2fa8fd9cd175b | [
"MIT"
] | permissive | jpiaskowski/rtimes | 7d5c5ffe3bc250f2ee1d5bee98a10dbd98c141b6 | 4eaef46ca60a35c879db68edba6a697418693850 | refs/heads/master | 2022-01-28T10:42:27.313696 | 2019-07-19T17:12:07 | 2019-07-19T17:12:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,668 | rd | rtimes-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtimes-package.R
\docType{package}
\name{rtimes-package}
\alias{rtimes-package}
\alias{rtimes}
\title{rtimes}
\description{
Interface to the Congress and Campaign Finance APIs from Propublica, and
the Article Search and Geographic 'APIs' from the New York Times.
}
\details{
Backstory is that the Congress and Campaign finance APIs used to be part of
NYTimes, but were taken over by Propublica.
}
\section{rtimes API}{
Functions that wrap these sets of APIs are prefixed by two letter
acronyms fo reach API endpoint + the function name itself as for example
\code{cg} + \code{fxn}
\itemize{
\item as - for the Article Search API (Docs:
\url{http://developer.nytimes.com/article_search_v2.json})
\item geo - for the Geographic API (Docs:
\url{http://developer.nytimes.com/geo_api_v2.json})
\item cg - for the Congress API (Docs:
\url{https://projects.propublica.org/api-docs/congress-api/})
\item cf - for the Campaign Finance API (Docs:
\url{https://propublica.github.io/campaign-finance-api-docs/})
}
See the vignette for help.
}
\section{Authentication}{
Get your own API keys for NYTimes APIs at
\url{https://developer.nytimes.com/accounts/create} - you can use one API key for
both the article search and geo NYTimes APIs.
Get your Propublica API key for Congress and Campaign Finance APIs at either
\url{https://www.propublica.org/datastore/api/propublica-congress-api} or
\url{https://www.propublica.org/datastore/api/campaign-finance-api} - as far
as I know, you can use the same key for both APIs
We set up the functions so that you can put the key in your \code{.Renviron}
file, which will be called on startup of R, and then you don't have to enter
your API key for each run of a function. Add entries for an R session like
\itemize{
\item \code{Sys.setenv(NYTIMES_API_KEY = "YOURKEYHERE")}
\item \code{Sys.setenv(PROPUBLICA_API_KEY = "YOURKEYHERE")}
}
Or set them across sessions by putting entries in your \code{.Renviron} file
like
\itemize{
\item \code{NYTIMES_API_KEY=<yourkey>}
\item \code{PROPUBLICA_API_KEY=<yourkey>}
}
You can also pass in your key in a function call, but be careful not to
expose your keys in code committed to public repositories. If you do pass
in a function call, use e.g., \code{Sys.getenv("NYTIMES_API_KEY")}
}
\section{Rate limits}{
Rate limits vary for the different APIs:
\itemize{
\item Article Search API: 1/sec, 1,000/day
\item Geographic API: 5/sec, 1,000/day
\item Congress API: 2/sec, 5,000/day
\item Campaign Finance API: 50/sec, 5,000/day
}
}
\author{
Scott Chamberlain \email{[email protected]}
}
\keyword{package}
|
8a494f981fa90f9be2e80e21b86d6d8d785d28e5 | 1e35733da24e026b16f7f82a4c3cd703abc0391b | /Diamond price prediction/Vector operations.R | 547006d75216b22033416622fda5b63225e4be1f | [] | no_license | deboshas/R-Handson | ec7a1a9b92c0125929b1d108b1e58ae9d2fbebcd | 48782b1965a6f2162a9553c7c57d038861084d49 | refs/heads/master | 2020-06-30T08:09:11.631045 | 2019-09-04T04:49:31 | 2019-09-04T04:49:31 | 200,773,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 792 | r | Vector operations.R | x <- c(10,20,30,40,45)
y <-c(1,2,3,4) #repeat the vector until it matches the x vector
z <- x+y
s <- x-y
logicalvector <- x > y
multiplicationvector <- x * 10
#operations
randomvector<- rnorm(5)
#R specific iteration, i is a vector
for ( i in randomvector){
print(i)
}
#
# print(randomvector[1])
# print(randomvector[2])
# print(randomvector[3])
# print(randomvector[4])
# print(randomvector[5])
#convential programming
for (j in 1:5){
print(randomvector[j])
}
#--------------------------------#
N<- 10000000
a<- rnorm(N)
b <- rnorm(N)
#multiplication in R way shorther tha traditiona way,and faster than traditioanl way
c <- a *b
d <- rep(NA,N)
#Multiplication in traditional way,takes more code to and takes more time than R way
for( i in 1:N){
d[i]=a[i]*b[i]
}
|
3b5b21b795c6047437c13ac29d3c30227e06339d | 6e1ed64a0769a0776a01f2db6c42510477acc13f | /inst/doc/introduction.R | 7a53e389ecbcb5ecdfc8b76b26fcc54926a1de96 | [] | no_license | cran/creditmodel | f9e008df63f25cdaefebe79a82aaf9aad3598c8c | a4f07950173e6e0f6c6565a08fc564ec2075cb36 | refs/heads/master | 2022-01-19T11:26:07.485897 | 2022-01-07T10:32:41 | 2022-01-07T10:32:41 | 183,883,743 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,853 | r | introduction.R | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(echo = FALSE)
library(creditmodel)
## ----fig.width = 10-----------------------------------------------------------
B_model = training_model(dat = UCICreditCard,
model_name = "UCICreditCard",
target = "default.payment.next.month",
x_list = NULL,
occur_time = "apply_date",
obs_id = "ID",
dat_test = NULL,
preproc = TRUE,
miss_values = list(-1, -2,"\"\"\"\"",""),
missing_proc = TRUE,
outlier_proc = TRUE,
trans_log = TRUE,
feature_filter = list(filter = c("IV", "COR"),
cv_folds = 1,
iv_cp = 0.01,
psi_cp = 0.2,
cor_cp = 0.7,
xgb_cp = 0,
hopper = F),
vars_plot = FALSE,
algorithm = list("LR"),
breaks_list = NULL,
LR.params = lr_params(
iter = 2,
method = 'random_search',
tree_control = list(p = 0.02,
cp = c(0.00001, 0.00000001),
xval = 5,
maxdepth = c(10, 15)),
bins_control = list(bins_num = 10,
bins_pct = c(0.02, 0.03, 0.05),
b_chi = c(0.01, 0.02, 0.03),
b_odds = 0.1,
b_psi = c(0.02, 0.06),
b_or = c(.05, 0.1, 0.15, 0.2),
mono = c(0.1, 0.2, 0.4, 0.5),
odds_psi = c(0.1, 0.15, 0.2),
kc = 1),
f_eval = 'ks',
lasso = TRUE,
step_wise = FALSE),
parallel = FALSE,
cores_num = NULL,
save_pmml = FALSE,
plot_show = TRUE,
model_path = tempdir(),
seed = 46)
|
db1f1688967fbfff29864620d95d8f9bce416152 | 1f71bcffe9b3b1cb424702b3bdda1e7f161da9de | /man/NPP.Rd | e40798acd6c1b1346b0965fc398c80bb2261510f | [] | no_license | Mavbegg/MIAMI | 55abab9d8048cacba9a2ed22ce5a6b6f0c70b525 | 8e4e24670cb32ea7aa16bd145ef5422f115fe0ef | refs/heads/master | 2021-01-21T17:38:32.567589 | 2017-05-21T17:13:05 | 2017-05-21T17:22:39 | 91,974,117 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 452 | rd | NPP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NPP.R
\name{NPP}
\alias{NPP}
\title{Net primary productivity (NPP) estimation from temperature and precipitation data}
\usage{
NPP(temp, pre)
}
\arguments{
\item{temp}{temperture precipitation vector}
\item{pre}{precipitation vector}
}
\description{
This function allows you to estimate NPP
from average annual temperture and cummulative precpitation data.
}
\keyword{NPP}
|
0bbde597c97d6b6bc680bf6a1b785307185aca04 | 39378092783a6f25aad182f44beb4f6b3197daf8 | /templates/save.R | 7c8807548ba14d169e64169bdf94101d4f239580 | [] | no_license | mariafiruleva/automated_processing_scrnaseq | 0216522a692ad6a6fb7d869aa8a746f8d614a633 | 86376dcb84d70ed8269df0579e6562621e4ed13b | refs/heads/master | 2020-07-25T08:15:39.658441 | 2019-09-14T07:07:26 | 2019-09-14T07:07:26 | 208,227,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | save.R | file_out <- paste0("{{ RunName }}", '.RData')
save(list = c('whole', 'whole.markers'), file = file_out)
write.table(top50_log_fc, "top50_log_fc.tsv", sep="\t", quote=F, row.names=F)
write.table(top100_log_fc, "top100_log_fc.tsv", sep="\t", quote=F, row.names=F)
write.table(top200_log_fc, "top200_log_fc.tsv", sep="\t", quote=F, row.names=F)
write.table(top50_adj_pval, "top50_adj_pval.tsv", sep="\t", quote=F, row.names=F)
write.table(top100_adj_pval, "top100_adj_pval.tsv", sep="\t", quote=F, row.names=F)
write.table(top200_adj_pval, "top200_adj_pval.tsv", sep="\t", quote=F, row.names=F) |
22a8551e5c40e3d6de4e743699dbf8427aa07421 | a5e538a9125dcdd90bae6bf44cb1d67ea87f33b9 | /man/getDataset.Rd | 1984744bd78ea68dd52def1d4a7f21f67c8dfcb5 | [
"Apache-2.0"
] | permissive | mrhelmus/data.world-r | 3d45003aa13d1d7b596f9456306cc24ac0e49b6f | 81a648878d7ee68cb91e7cee82aa8156c59770b9 | refs/heads/master | 2021-01-21T19:05:32.717425 | 2017-03-28T20:28:50 | 2017-03-28T20:28:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 653 | rd | getDataset.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getDataset.R
\name{getDataset}
\alias{getDataset}
\title{Retrieve a dataset via data.world public api}
\usage{
getDataset(connection, dataset)
}
\arguments{
\item{connection}{the connection to data.world}
\item{dataset}{the "agentid/datasetid" for the dataset against which to execute the query}
}
\value{
the data.world dataset's metadata
}
\description{
Retrieve a dataset via data.world public api
}
\examples{
connection <- data.world(token = "YOUR_API_TOKEN_HERE")
getDataset(connection, dataset="user/dataset")
}
\seealso{
https://docs.data.world/documentation/api/
}
|
2d27cdf7fc2a2da7dfa8b95aa80cd36eea6c63cc | 83d93f6ff2117031ba77d8ad3aaa78e099657ef6 | /man/gframe.Rd | 7cc21f87995298e6bef6eae62203142b1e9a5ec6 | [] | no_license | cran/gWidgets2 | 64733a0c4aced80a9722c82fcf7b5e2115940a63 | 831a9e6ac72496da26bbfd7da701b0ead544dcc1 | refs/heads/master | 2022-02-15T20:12:02.313167 | 2022-01-10T20:12:41 | 2022-01-10T20:12:41 | 17,696,220 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,457 | rd | gframe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gframe.R
\name{gframe}
\alias{gframe}
\alias{.gframe}
\title{Constructor for framed box container with label}
\usage{
gframe(
text = "",
markup = FALSE,
pos = 0,
horizontal = TRUE,
spacing = 5,
container = NULL,
...,
toolkit = guiToolkit()
)
.gframe(
toolkit,
text = "",
markup = FALSE,
pos = 0,
horizontal = TRUE,
spacing = 5,
container = NULL,
...
)
}
\arguments{
\item{text}{frame label}
\item{markup}{does label use markup (toolkit specific)}
\item{pos}{position of label: 0=left, 1=right, some toolkit allow values in between}
\item{horizontal}{logical. If TRUE, left to right layout, otherwise top to bottom}
\item{spacing}{spacing aroud widget}
\item{container}{parent container}
\item{...}{passed through}
\item{toolkit}{toolkit}
}
\description{
The framed box container inherits from \code{ggroup}. The main
addition is a label, which is accessed via the \code{name} method.
}
\note{
to include a scrollwindow, place a \code{ggroup} within this window.
}
\examples{
\dontrun{
w <- gwindow("gformlayout", visible=FALSE)
f <- gframe("frame", horizontal=FALSE, container=w)
glabel("Lorem ipsum dolor sit amet, \nconsectetur adipiscing elit.", container=f)
gbutton("change name", container=f, handler=function(h,...) {
names(f) <- "new name"
})
visible(w) <- TRUE
}
}
\seealso{
\code{\link{ggroup}} and \code{\link{gexpandgroup}}
}
|
778aeb7c2ce716b31b798cceb17e6b487856bcf2 | d2eda24acceb35dc11263d2fa47421c812c8f9f6 | /man/run.advection.Rd | fae747ec478f8b6703ef2aef123c935ae331e40e | [] | no_license | tbrycekelly/TheSource | 3ddfb6d5df7eef119a6333a6a02dcddad6fb51f0 | 461d97f6a259b18a29b62d9f7bce99eed5c175b5 | refs/heads/master | 2023-08-24T05:05:11.773442 | 2023-08-12T20:23:51 | 2023-08-12T20:23:51 | 209,631,718 | 5 | 1 | null | null | null | null | UTF-8 | R | false | true | 692 | rd | run.advection.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source.lagrangian.r
\name{run.advection}
\alias{run.advection}
\title{Advect Lagrangian Particle}
\usage{
run.advection(particles, model, advection, zlim = c(-6000, 0), verbose = T)
}
\arguments{
\item{particles}{a particle release dataframe such as that generated by `init.lagrangian.particles()`}
\item{model}{a model list such as that generated by `init.lagrangian.model()`}
\item{advection}{An advection list, such as one generated by `load.oscar.advection()`, that contains U V and W}
\item{dt}{Time step for RK4 advection scheme}
}
\description{
Advect Lagrangian Particle
}
\author{
Thomas Bryce Kelly
}
|
19ef7cec3e7eabdd23abfc64880e8feb319c6df4 | 27453005e827f25ee1f4299ed40bd5f16873cb54 | /man/soft_threshold.Rd | 93726105c44bcbb611fb874d52786693e7efb936 | [
"MIT"
] | permissive | seanigami/stpca | 9bb4bece7113f9729e2fd6cf7e5afec1511cec0d | a8eebfc6f1bff0a4457a23b975238afa3ae664c2 | refs/heads/master | 2022-01-13T19:04:21.702988 | 2018-08-16T15:01:01 | 2018-08-16T15:01:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 416 | rd | soft_threshold.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparse-W.R
\name{soft_threshold}
\alias{soft_threshold}
\title{Soft-thresholding operator.}
\usage{
soft_threshold(x, threshold)
}
\arguments{
\item{x}{the vector/matrix of values to be soft thresholded}
\item{threshold}{the threshold lambda}
}
\description{
The soft-thresholding operator S_\\lambda(x) = sign(x)(abs(x) - \\lambda)_+
}
|
c958ca7b4a1a18bb7b38da4314b16c71dac42064 | d6bcbdfbc175110d2dffcc8518e066601c39539f | /software/binda/download/dorothea-analysis.R | f0bbe97d12375b992e8096fab7798b2af038915f | [] | no_license | strimmerlab/strimmerlab.github.io | 1b322732a8d73e1509a067012e3720bfd2e58b1b | e8fcdd763eee6dbed58b955c0d16fe85742df351 | refs/heads/master | 2023-08-31T22:15:35.651056 | 2023-08-28T17:04:48 | 2023-08-28T17:04:48 | 23,162,223 | 0 | 2 | null | 2023-02-08T21:15:23 | 2014-08-20T19:59:27 | HTML | UTF-8 | R | false | false | 3,127 | r | dorothea-analysis.R | # /*
# This is an R script containing R markdown comments. It can be run as is in R.
# To generate a document containing the formatted R code, R output and markdown
# click the "Compile Notebook" button in R Studio, or run the command
# rmarkdown::render() - see http://rmarkdown.rstudio.com/r_notebook_format.html
# */
#' ---
#' title: "Analysis of Dorothea Data Set"
#' output: pdf_document
#' author: "Sebastian Gibb and Korbinian Strimmer"
#' date: 30 April 2015
#' ---
#'
#' # Dorothea data set
#'
#' The Dorothea data set in R data format can be downloaded from
#' https://strimmerlab.github.io/data/dorothea.rda
#'
#' Load data set
load("dorothea.rda")
#' Training data:
dim(x.train) # 800 samples, 100000 features
table(y.train)
#' Validation data:
dim(x.valid) # 350 samples, 100000 features
table(y.valid)
#'
#' # Ranking of features according to binda
#' Load binda R package
library("binda")
#' Compute ranking:
br = binda.ranking(x.train, y.train, verbose=TRUE)
#' List the 20 top ranking predictors
br[1:20,]
plot(br, top=20)
#' Plot ranks:
par(mfrow=c(1,2))
plot(br[,"score"], type="l", ylab="Ranking Score", xlab = "Rank (1 to 100000)")
plot(br[,"score"], type="l", xlim=c(1, 1000), ylab="Ranking Score", xlab = "Rank (1 to 1000)")
par(mfrow=c(1,1))
#'
#' # Cross-validation analysis
#' Function zo compute test accuracy:
predfun = function(Xtrain, Ytrain, Xtest, Ytest)
{
# learn predictor
binda.fit = binda(Xtrain, Ytrain, lambda.freq=0, verbose=FALSE)
# predict classes using new test data
yhat = predict(binda.fit, Xtest, verbose=FALSE)$class
acc = ( sum( yhat == Ytest )/length(yhat) )
return(acc)
}
#' Use cross-validation to find optimal number of predictors:
library("crossval")
set.seed(12345)
numPreds = c(1:10,20, 50, 100 )
simFun = function(numpred)
{
cat("Number of Predictors:", numpred, "\n")
predlist = br[1:numpred, "idx"]
x.train2 = x.train[,predlist, drop=FALSE ]
x.valid2 = x.valid[,predlist, drop=FALSE ]
valError = predfun(x.train2, y.train, x.valid2, y.valid)
cv.out = crossval(predfun, x.train2, y.train, K=5, B=20, verbose=FALSE)
return( c(valError=valError, ACC=cv.out$stat, ACC.se = cv.out$stat.se) )
}
#' This may take some time:
cvsim = lapply(numPreds, simFun)
cvsim = do.call(rbind, cvsim)
binda.sim = cbind(numPreds,cvsim)
binda.sim
#' Validation error if all 100000 predictors are included:
valErrorAll = predfun(x.train, y.train, x.valid, y.valid)
valErrorAll
#'
#' # Comparison with Random Forest
library("randomForest")
if( file.exists("rf.rda") )
{
load("rf.rda") # load precomputed random forest
}
if( !file.exists("rf.rda") )
{
set.seed(12345)
rf.fit = randomForest(x.train, y=as.factor(y.train), ntree=100, importance=TRUE)
save(rf.fit, file="rf.rda")
}
#' Ranking of predictors using training data
varimp = rf.fit$importance[,4] # MeanDecreaseGini
names(varimp) = NULL
ovi = order(varimp, decreasing=TRUE)
idx = ovi[1:20]
cbind(idx, MeanDecreaseGini=varimp[idx])
#' Test error
yhat = predict(rf.fit, x.valid)
acc = ( sum( yhat == y.valid )/length(yhat) )
acc
|
e76b882513df30b546540710d71db0acdf7ff39e | 0c266c36fb25113e35afebdf6a037ade43b0e3da | /man/Wishbone.Rd | 62eccb585302f956ddd16328887a95a1899f5945 | [] | no_license | dynverse/Wishbone | 1c684546f7a9f21bdb4abedb07503c34cc2d51dd | 7f9ff10806ec5b959f76ab4aa1e513891581e5ba | refs/heads/master | 2021-01-23T20:07:18.058856 | 2018-06-06T12:58:05 | 2018-06-06T12:58:05 | 102,852,835 | 0 | 0 | null | 2018-06-06T12:58:06 | 2017-09-08T11:08:37 | R | UTF-8 | R | false | true | 974 | rd | Wishbone.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Wishbone.R
\name{Wishbone}
\alias{Wishbone}
\title{Execute Wishbone}
\usage{
Wishbone(counts, start_cell_id, knn = 10, n_diffusion_components = 2,
n_pca_components = 15, markers = "~", branch = TRUE, k = 15,
num_waypoints = 50, normalize = TRUE, epsilon = 1, verbose = FALSE,
num_cores = 1)
}
\arguments{
\item{counts}{Counts}
\item{start_cell_id}{ID of start cell}
\item{knn}{k nearest neighbours for diffusion}
\item{n_diffusion_components}{number of diffusion components}
\item{n_pca_components}{number of pca components}
\item{markers}{markers to use}
\item{branch}{whether or not to branch or linear}
\item{k}{k param}
\item{num_waypoints}{number of waypoints}
\item{normalize}{whether or not to normalize}
\item{epsilon}{epsilon param}
\item{verbose}{whether or not to print the wishbone output}
\item{num_cores}{number of cores to use}
}
\description{
Execute Wishbone
}
|
59834a69ef133638e7782f9b26266dbc1d349a6f | 8b42ce326b7641a6b14c1a6d42e6e2a3ff0a5ca0 | /model_ann1.R | 58142b6a589948576f45e8b5190d237a5fe45410 | [] | no_license | coldfir3/boston_ma | 8ce5819a843214ae1f6b4b0781ac07939112d3a5 | 98e5e3250939ee3dabc0cb6db2ab56432cd6f6c4 | refs/heads/master | 2020-03-25T21:51:41.144726 | 2018-08-10T14:35:46 | 2018-08-10T14:35:46 | 144,193,281 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,929 | r | model_ann1.R | rm(list = ls())
library(keras)
library(tidyverse)
dataset <- dataset_boston_housing()
c(c(x_train, y_train), c(x_test, y_test)) %<-% dataset
rm(dataset)
save(x_train, y_train, x_test, y_test, file = 'dataset.data')
mean <- apply(x_train, 2, mean)
std <- apply(x_train, 2, sd)
save(mean, std, file = 'scale.data')
x_train <- scale(x_train, center = mean, scale = std)
x_test <- scale(x_test, center = mean, scale = std)
model_ann1 <- keras_model_sequential() %>%
layer_dense(units = 64, activation = "relu", input_shape = dim(x_train)[2]) %>%
layer_dense(units = 64, activation = "relu") %>%
layer_dense(units = 1) %>%
compile(
optimizer = "rmsprop",
loss = "mse",
metrics = c("mae")
)
ann1_weights <- get_weights(model_ann1)
k <- 4
indices <- sample(1:nrow(x_train))
folds <- cut(indices, breaks = k, labels = FALSE)
num_epochs <- 100
scores <- NULL
for (i in 1:k) {
cat("processing fold #", i, "\n")
val_indices <- which(folds == i, arr.ind = TRUE)
val_data <- x_train[val_indices,]
val_targets <- y_train[val_indices]
partial_x_train <- x_train[-val_indices,]
partial_y_train <- y_train[-val_indices]
model_ann1 %>% set_weights(ann1_weights)
model_ann1 %>% fit(partial_x_train, partial_y_train,
epochs = num_epochs, batch_size = 10, verbose = 0)
results <- model_ann1 %>% evaluate(val_data, val_targets, verbose = 0)
scores <- c(scores, results$mean_absolute_error)
}
mean(scores)/diff(range(y_train))*100
callbacks_list <- list(
callback_early_stopping(
monitor = "mae",
patience = 1
),
callback_model_checkpoint(
filepath = "model_ann1.h5",
monitor = "val_loss",
save_best_only = TRUE
)
)
model_ann1 %>% set_weights(ann1_weights)
model_ann1 %>% fit(x_train, y_train,
epochs = num_epochs, batch_size = 10,
validation_data = list(x_test, y_test),
callbacks = callbacks_list)
|
f191bd881bbdcb878a82420c74d3abc2643fc8b9 | e915168439022365a3f84cd995bca3808c6acaa7 | /sim-stuff/HD-settings/test3.R | 007ddfb3660ea73587b4418e68e99df18f62c49d | [] | no_license | samperochkin/learning-block-exchangeability | 675e79b6b5f2ee17ef1524a05329ba3f208ba443 | a45142de8137f19620f95e372aa1284eafca1a57 | refs/heads/master | 2023-03-10T02:37:50.353800 | 2023-02-22T15:23:42 | 2023-02-22T15:23:42 | 104,235,865 | 2 | 0 | null | 2018-03-28T16:05:11 | 2017-09-20T15:48:00 | R | UTF-8 | R | false | false | 1,659 | r | test3.R | dim(X)
n <- 150
X <- rmvnorm(n,rep(0,d),Sigma)
Delta <- constructMatrix(rep(1,length(d.vec)),d.vec)
Th1 <- cor.fk(X[1:125,])
Th2 <- cor.fk(X[126:150,])
Tt2 <- (Delta %*% (Th2-diag(d)) %*% Delta)/(Delta %*% (1-diag(d)) %*% Delta)
sum((Tt2-Th1)^2)/sum((Th2-Th1)^2)
sum((Tt2-Tau)^2)/sum((Th2-Tau)^2)
n <- 200
X <- rmvnorm(n,rep(0,d),Sigma)
Delta <- constructMatrix(rep(1,length(d.vec)),d.vec)
Ths <- lapply(1:2, function(k){
print(k)
cor.fk(X[1:100 + (k-1)*100,])
})
# Tts <- lapply(Ths, function(Th){
# (Delta %*% (Th-diag(d)) %*% Delta)/(Delta %*% (1-diag(d)) %*% Delta)
# })
# pairs <- t(combn(1:50,2))
#
# par(mar = c(2,2,1,1))
# hist(sapply(1:nrow(pairs), function(k){
# sum((Ths[[pairs[k,1]]]-Tts[[pairs[k,2]]])^2)/sum((Ths[[pairs[k,1]]]-Ths[[pairs[k,2]]])^2)
# }), breaks=10)
hc <- hclust(dist(Ths[[1]]),method="ward.D2")
Ds <- lapply(1:100, function(k){
print(k)
clus <- cutree(hc,k)
D <- sapply(1:k, function(i){
vec <- rep(0,d)
vec[which(clus==i)] <- 1
vec
})
tcrossprod(D)
})
Tts <- lapply(Ds, function(D){
print("hey")
(D %*% (Ths[[1]]-diag(d)) %*% D)/(D %*% (1-diag(d)) %*% D)
})
Tts <- lapply(Tts, function(Tt){
diag(Tt) <- 1
Tt
})
# clus <- cutree(hc,6)
# D <- sapply(1:6, function(i){
# vec <- rep(0,d)
# vec[which(clus==i)] <- 1
# vec
# })
# D <- tcrossprod(D)
# Tts.hc[[6]] <- (D %*% (Ths[[1]]-diag(d)) %*% D)/(D %*% (1-diag(d)) %*% D)
par(mar = c(2,2,1,1))
SS2 <- sapply(seq_along(Tts[-1]), function(i){
sum((Ths[[2]]-Tts[[1+i]])^2)
})
SS1 <- sapply(seq_along(Tts[-1]), function(i){
sum((Tau-Tts[[1+i]])^2)
})
plot(SS1)
which.min(SS2)
plot(SS2)
which.min(SS2)
|
1c5be82e3d96a085bfb858b957e132e984636213 | fe17217bf85ed660a1fa3173f6078133c5bc49e0 | /man/lm.WZ.Rd | bc56ca8a85f768283b96d85564b1eb15b89d7f77 | [] | no_license | rgcstats/ODS | 5a4ba2107328175174b4874e10d8e47733c33397 | 0290071546fdd8dff1c8e9e6d8bc5920d1c04491 | refs/heads/master | 2020-12-10T10:30:36.232624 | 2020-01-13T10:20:29 | 2020-01-13T10:20:29 | 77,803,517 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,320 | rd | lm.WZ.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm_WZ.R
\name{lm.WZ}
\alias{lm.WZ}
\title{Linear model fitting for stratified survey data by maximising the estimated likelihood of yu and xs}
\usage{
lm.WZ(ys, yr, xs, cutoffs, pi.s)
}
\arguments{
\item{ys}{vector of sample values of the dependent variable}
\item{yr}{vector of non-sample values of the dependent variable}
\item{xs}{matrix of covariate values (number of rows must equal n where n is length of ys).
Intercept is not assumed, so xs should contain a column of 1s if you want an intercept in the model.}
\item{cutoffs}{A vector of finite cutoffs defining the strata.
If there are H cutoffs there are H+1 strata.}
\item{pi.s}{n-vector (where n=length(y)) of probabilities of selection for the sample units}
}
\value{
a vector containing p estimated coefficients (where p=ncol(xs)) and the
estimated error standard deviation. No variance estimates provided.
}
\description{
This function fits a linear model to survey data stratified by
the dependent variable, by maximising the estimated likelihood
from Weaver and Zhou (2012).
}
\examples{
data(population_example)
lm.WZ(ys=sample.example$y,yr=population.example$y[population.example$s.indicators==0],
xs=cbind(1,sample.example$x),cutoffs=c(1,2,3),pi.s=sample.example$pi)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.