blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95ceb73042a4f2fe6da6b944f2757ab3fbc1da90
|
eb7d452b52b530796cba8fa13b405b650b8df4a7
|
/find_hist_stat.R
|
ecc00bf6c66f3712a9546c560d896605b92f0e16
|
[] |
no_license
|
MehliyarSadiq/TEMIR
|
62730c3a9f1b62a2855385582fff87432b66eef3
|
c4546f8b0084be03ec56f19c8ee8019c43354e8d
|
refs/heads/master
| 2020-06-05T01:23:37.520711 | 2019-06-17T03:02:11 | 2019-06-17T03:02:11 | 192,264,948 | 0 | 0 | null | 2019-06-17T02:55:20 | 2019-06-17T02:55:20 | null |
UTF-8
|
R
| false | false | 7,030 |
r
|
find_hist_stat.R
|
################################################################################
### Module for calculating statistics of default hourly output data in nc files
################################################################################
# Function to find daily statistics (e.g., mean, max, min) from default hourly output data in nc files:
f_daily_stat = function(hist_name, start_date, end_date, varid, FUN=mean, hist_data_dir='~/TGABI/Tai/TEMIR/hist_data/') {
# This function requires external functions: make.date.vec (from "tools.R")
# This function requires R packages: ncdf4
# There cannot be any missing dates in between "start_date" and "end_date".
# Vector of simulation dates:
date_vec = make.date.vec(start.date=start_date, end.date=end_date)
# Number of simulation days:
n_day = length(date_vec)
# Define new data array:
filename = paste0(hist_data_dir, hist_name, '/hist_grid_', as.character(start_date), '.nc')
nc = nc_open(filename)
lon = ncvar_get(nc, varid='lon')
lat = ncvar_get(nc, varid='lat')
pft = ncvar_get(nc, varid='pft')
hour = ncvar_get(nc, varid='hour')
nc_close(nc)
hist_daily = array(NaN, dim=c(length(lon), length(lat), n_day, length(pft)))
# Looping over days:
for (d in 1:n_day) {
print(paste0('Processing ', varid, ' for date = ', as.character(date_vec[d])), quote=FALSE)
# Extract nc file:
hist_hourly = array(NaN, dim=c(length(lon), length(lat), length(pft), length(hour)))
filename = paste0(hist_data_dir, hist_name, '/hist_grid_', as.character(date_vec[d]), '.nc')
nc = nc_open(filename)
hist_hourly[,,,] = ncvar_get(nc, varid=varid)
nc_close(nc)
# Find daily statistics:
hist_daily[,,d,] = apply(hist_hourly, MARGIN=1:3, FUN=FUN, na.rm=TRUE)
}
return(hist_daily)
}
################################################################################
# Function to find monthly mean from default hourly output data in nc files, including the option to sum over all PFTs:
f_monthly_mean = function(hist_name, start_date, end_date, varid, PFT_sum=FALSE, PFT_frac=NULL, hist_data_dir='~/TGABI/Tai/TEMIR/hist_data/') {
# This function requires external functions: make.date.vec (from "tools.R")
# This function requires R packages: ncdf4
# There cannot be any missing dates in between the first and last days of a month.
# Therefore, "start_date" should always be the first day of a given month, and "end_date" should be the last day of a given month.
# If "PFT_sum=TRUE", weighted sum over all PFTs (weighted by "PFT_frac") will be calculated.
# "PFT_frac": dim1 = lon; dim2 = lat; dim3 = pft
# Vector of simulation months:
date_vec = make.date.vec(start.date=start_date, end.date=end_date)
month_vec = unique(floor(date_vec/1e2))*1e2 + 1
# Number of simulation months:
n_month = length(month_vec)
# Define new data array:
filename = paste0(hist_data_dir, hist_name, '/hist_grid_', as.character(start_date), '.nc')
nc = nc_open(filename)
lon = ncvar_get(nc, varid='lon')
lat = ncvar_get(nc, varid='lat')
pft = ncvar_get(nc, varid='pft')
hour = ncvar_get(nc, varid='hour')
if (PFT_sum) hist_monthly = array(NaN, dim=c(length(lon), length(lat), n_month)) else hist_monthly = array(NaN, dim=c(length(lon), length(lat), n_month, length(pft)))
# Looping over months:
for (m in 1:n_month) {
print(paste0('Processing ', varid, ' for month = ', substr(as.character(month_vec[m]), start=1, stop=6)), quote=FALSE)
# Generate hourly data array for each month:
date_vec_sub = date_vec[which(floor(date_vec/1e2) == floor(month_vec[m]/1e2))]
hist_hourly = array(NaN, dim=c(length(lon), length(lat), length(pft), length(date_vec_sub)*length(hour)))
# Looping over days:
for (d in 1:length(date_vec_sub)) {
# Extract nc file:
ind_hr = ((d - 1)*length(hour) + 1):((d - 1)*length(hour) + length(hour))
filename = paste0(hist_data_dir, hist_name, '/hist_grid_', as.character(date_vec_sub[d]), '.nc')
nc = nc_open(filename)
hist_hourly[,,,ind_hr] = ncvar_get(nc, varid=varid)
nc_close(nc)
}
# Find monthly mean:
if (PFT_sum) {
hist_monthly_PFT = apply(hist_hourly, MARGIN=1:3, FUN=mean, na.rm=TRUE)
hist_monthly[,,m] = apply(hist_monthly_PFT*PFT_frac, MARGIN=1:2, FUN=sum, na.rm=TRUE)
} else {
hist_monthly[,,m,] = apply(hist_hourly, MARGIN=1:3, FUN=mean, na.rm=TRUE)
}
}
return(hist_monthly)
}
# timestamp()
# out2 = f_monthly_mean(hist_name='control_global_2010', start_date=20100601, end_date=20100831, varid='A_can')
# timestamp()
# # It requires ~100 seconds to finish 3 months.
################################################################################
# Function to find monthly mean of any daily statistic (e.g., daily max, min) from default hourly output data in nc files:
f_monthly_mean_stat = function(hist_name, start_date, end_date, varid, FUN=max, hist_data_dir='~/TGABI/Tai/TEMIR/hist_data/') {
# This function requires external functions: make.date.vec (from "tools.R"), f_daily_stat
# This function requires R packages: ncdf4
# There cannot be any missing dates in between the first and last days of a month.
# Therefore, "start_date" should always be the first day of a given month, and "end_date" should be the last day of a given month.
# Vector of simulation months:
date_vec = make.date.vec(start.date=start_date, end.date=end_date)
month_vec = unique(floor(date_vec/1e2))*1e2 + 1
# Number of simulation months:
n_month = length(month_vec)
# Define new data array:
filename = paste0(hist_data_dir, hist_name, '/hist_grid_', as.character(start_date), '.nc')
nc = nc_open(filename)
lon = ncvar_get(nc, varid='lon')
lat = ncvar_get(nc, varid='lat')
pft = ncvar_get(nc, varid='pft')
hour = ncvar_get(nc, varid='hour')
hist_monthly = array(NaN, dim=c(length(lon), length(lat), n_month, length(pft)))
# Looping over months:
for (m in 1:n_month) {
# Find daily statistic for each month:
date_vec_sub = date_vec[which(floor(date_vec/1e2) == floor(month_vec[m]/1e2))]
hist_daily = f_daily_stat(hist_name=hist_name, start_date=date_vec_sub[1], end_date=tail(date_vec_sub, 1), varid=varid, FUN=FUN, hist_data_dir=hist_data_dir)
# Find monthly mean of daily statistic:
hist_monthly[,,m,] = apply(hist_daily, MARGIN=c(1,2,4), FUN=mean, na.rm=TRUE)
}
return(hist_monthly)
}
# timestamp()
# out1 = f_monthly_mean(hist_name='control_global_2010', start_date=20100601, end_date=20100831, varid='A_can')
# timestamp()
# # It requires ~390 seconds to finish 3 months.
################################################################################
### End of module
################################################################################
|
e2caceb1631f13034df3fb2c24072de569cbc925
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ROC632/examples/ROC.Rd.R
|
8982c98b4f6435439fd0250ee09d5edf1887e352
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 487 |
r
|
ROC.Rd.R
|
library(ROC632)
### Name: ROC
### Title: Estimation of the traditional ROC curves (without censoring)
### Aliases: ROC
### Keywords: ROC curve
### ** Examples
# import and attach the data example
X <- c(1, 2, 3, 4, 5, 6, 7, 8) # The value of the marker
Y <- c(0, 0, 0, 1, 0, 1, 1, 1) # The value of the binary outcome
ROC.obj <- ROC(status=Y, marker=X, cut.values=sort(X))
plot(ROC.obj$FP, ROC.obj$TP, ylab="True Positive Rates",
xlab="False Positive Rates", type="s", lwd=2)
|
3030ead456f623c034c9d16c4fbe51751d861d0a
|
4b06cc5da85d381921c1ffedd44e08d9d6839a03
|
/Data Science/corr.R
|
08aaa3498e136501fbbcbe6a2c82d23918760dde
|
[] |
no_license
|
muaoran/R
|
2fe8881b95a71b0cdc68f952c942824c20e631b5
|
2fdf31ad33666d18c7c9500434f3f129bf9c2321
|
refs/heads/master
| 2021-01-10T06:51:50.553052 | 2015-12-26T10:31:34 | 2015-12-26T10:31:34 | 50,765,755 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 514 |
r
|
corr.R
|
corr<-function(directory, threshold=0){
dirt<-list.files(directory,full.name=TRUE)
data_source<-vector(mode="numeric",length=0)
for (i in 1:length(dirt)){
monitor_i<-read.csv(dirt[i])
corr_sum<-sum((!is.na(monitor_i$sulfate))&(!is.na(monitor_i$nitrate)))
monitor_i_1<-monitor_i[which(!is.na(monitor_i$sulfate)),]
monitor_i_2<-monitor_i_1[which(!is.na(monitor_i_1$nitrate)),]
if(corr_sum>threshold){data_source<-c(data_source,cor(monitor_i_2$sulfate,monitor_i_2$nitrate))
}}
data_source}
|
69c5c2e341a06c3a11de375f7c3a89449b606a7f
|
0ab233b9f40236e52ad2bb43dadd2ffca739aa8b
|
/R/parse_post.R
|
b63267de4cd1ecfbec2d4abdf1123bbd96b184cf
|
[
"Apache-2.0"
] |
permissive
|
opencpu/opencpu
|
49cada256c9a67a8ea8514d848986b5305f36172
|
b8e9c840b90afb33abeae5c2a353339217cfdee2
|
refs/heads/master
| 2023-08-30T08:24:19.756598 | 2023-08-06T13:35:23 | 2023-08-06T13:35:23 | 10,206,132 | 384 | 62 |
NOASSERTION
| 2023-08-06T13:35:24 | 2013-05-21T21:45:12 |
R
|
UTF-8
|
R
| false | false | 2,141 |
r
|
parse_post.R
|
parse_post <- function(reqbody, contenttype){
#check for no data
if(!length(reqbody)){
return(list())
}
#strip title form header
contenttype <- sub("Content-Type: ?", "", contenttype, ignore.case=TRUE);
#invalid content type
if(!length(contenttype) || !nchar(contenttype)){
stop("No Content-Type header found.")
}
# test for multipart
if(grepl("multipart/form-data", contenttype, fixed=TRUE)){
return(multipart(reqbody, contenttype));
# test for url-encoded
} else if(grepl("x-www-form-urlencoded", contenttype, fixed=TRUE)){
if(is.raw(reqbody)){
return(webutils::parse_query(reqbody));
} else {
return(as.list(reqbody));
}
# test for json
} else if(grepl("^application/json", contenttype)){
if(is.raw(reqbody)){
jsondata <- rawToChar(reqbody);
} else {
jsondata <- reqbody;
}
if(!(is_valid <- validate(jsondata))){
stop("Invalid JSON was posted: ", attr(is_valid, "err"))
}
obj <- as.list(fromJSON(jsondata));
# test for protobuf
} else if(grepl("^application/r?protobuf", contenttype)){
if(is.raw(reqbody)){
obj <- protolite::unserialize_pb(reqbody);
} else {
stop("ProtoBuf payload was posted as text ??")
}
} else if(grepl("^application/rds", contenttype)){
obj <- readRDS(gzcon(rawConnection(reqbody)))
} else {
stop("POST body with unknown conntent type: ", contenttype);
}
# Empty POST data
if(is.null(obj))
obj <- as.list(obj)
if(!is.list(obj) || length(names(obj)) < length(obj)){
stop("JSON or ProtoBuf input should be a named list.")
}
return(lapply(obj, function(x){
if(is.null(x) ||
isTRUE(is.atomic(x) && length(x) == 1 &&
!length(dim(x))) && is.null(names(x))){
#primitives as expressions
return(deparse_atomic(x))
} else {
return(I(x))
}
}));
}
# base::deparse() fucks up utf8 strings
deparse_atomic <- function(x){
if(is.character(x) && !is.na(x)){
str <- jsonlite::toJSON(x)
str <- sub("^\\[", "c(", str)
sub("\\]$", ")", str)
} else {
paste(deparse(x), collapse = "\n")
}
}
|
1c1256cf3737c73a727335395bc2b13c85a6688a
|
8b885a8159c2a4cabd1555bb971fe7ceffb895f0
|
/ui.R
|
ca9293dc8b45ade30344382c4dc1032698d7500a
|
[] |
no_license
|
Frikster/mouseActionGrapher
|
c6459a941849e250d874a3c6ba99d3b4bcdc2eef
|
2001d0bb39da53d865dfc278609dc0810159fe31
|
refs/heads/master
| 2020-12-28T21:28:42.949756 | 2015-09-08T19:06:06 | 2015-09-08T19:06:06 | 39,651,937 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,834 |
r
|
ui.R
|
rm(list = ls())
# Immediately enter the browser/some function when an error occurs
# options(error = some funcion)
library(shiny)
library(DT)
shinyUI(fluidPage(
titlePanel("MurphyLab"),
sidebarLayout(
sidebarPanel(
# fileInput('file1', 'Choose CSV File',
# accept=c('text/csv',
# 'text/comma-separated-values,text/plain',
# '.csv')),
# checkboxInput('header', 'Header', TRUE),
# radioButtons('sep', 'Separator',
# c(Comma=',',
# Semicolon=';',
# Tab='\t'),
# ','),
# radioButtons('quote', 'Quote',
# c(None='',
# 'Double Quote'='"',
# 'Single Quote'="'"),
# '"'),
# selectizeInput('tagChooser', 'Choose Tags to plot', choices = c("data not loaded"), multiple = TRUE),
# selectizeInput('actionsTracked', 'Choose Actions to plot', choices = c("data not loaded"), multiple = TRUE),
# textInput("control_rate",
# "Rate in seconds",
# value = 3600),
# actionButton("go", "Plot")
),
mainPanel(
# sliderInput(inputId = "opt.cex",
# label = "Point Size (cex)",
# min = 0, max = 2, step = 0.25, value = 1),
# sliderInput(inputId = "opt.cexaxis",
# label = "Axis Text Size (cex.axis)",
# min = 0, max = 2, step = 0.25, value = 1),
# plotOutput("plot1"),
# DT::dataTableOutput("plotTable"),
# downloadButton('downloadSubset', 'Download Subset (coming soon)')
)
)
)
)
|
074bde3403025868ce16eaa4c07c1d308887c6b2
|
b75cdbee114168b86f64a51e3a8ca16433e30792
|
/code/renaissance_palette.R
|
7a34bdaa50cbb099be1d1e41f2112fc35cbd00d9
|
[] |
no_license
|
AndreaCirilloAC/dataviz
|
1ccdee0c474260ec986c550193e3a6348238f2d0
|
daffd43170f4ef3c44b8c6ded14d26323e184c38
|
refs/heads/master
| 2021-01-20T06:25:05.930740 | 2017-06-01T20:36:36 | 2017-06-01T20:36:36 | 89,876,329 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,099 |
r
|
renaissance_palette.R
|
library(pixmap)
library(dplyr)
library(scales)
#convert aminadab_rgb.jpg aminadab_rgb.ppm
painting_michelangelo <- read.pnm("images/profeta_daniele.ppm")
str(painting_michelangelo)
red_vector <- as.vector(painting_michelangelo@red)
green_vector <- as.vector(painting_michelangelo@green)
blue_vector <- as.vector(painting_michelangelo@blue)
data.frame(red_vector,green_vector,blue_vector) %>%
unique() -> rgb_triples
rgb_codes <- rgb(red = rgb_triples[,1],green = rgb_triples[,2], blue = rgb_triples[,3])
michelangelo_sample <- sample(rgb_codes,100)
painting_raffaello <- read.pnm("images/sacra_famiglia_canigiani.ppm")
red_vector <- as.vector(painting_raffaello@red)
green_vector <- as.vector(painting_raffaello@green)
blue_vector <- as.vector(painting_raffaello@blue)
data.frame(red_vector,green_vector,blue_vector) %>%
unique() -> rgb_triples
rgb_codes <- rgb(red = rgb_triples[,1],green = rgb_triples[,2], blue = rgb_triples[,3])
raffaello_sample <- sample(rgb_codes,100)
show_col(raffaello_sample)
show_col(michelangelo_sample)
|
258fc3ffa283dbfe3a7917209b716d6b7c7c7300
|
19499542a5d57031d3dc1f496ea0f80b14bc4a5f
|
/Discrete2/Output/2DUnitSquare/Rscript for UnitSquare.R
|
21c3fc7342830d1a1ac151157ffc2fb12db59b88
|
[
"Apache-2.0"
] |
permissive
|
jakent4498/IDS6938-SimulationTechniques
|
51168eb6cb05f9e33a843042961d9f6ff5121f52
|
2ecdbe57a51f8139839f0c22baf4c5bb34447e83
|
refs/heads/master
| 2021-01-09T06:16:11.443537 | 2017-04-24T02:03:06 | 2017-04-24T02:03:06 | 80,947,128 | 0 | 0 | null | 2017-02-04T20:42:02 | 2017-02-04T20:42:02 | null |
UTF-8
|
R
| false | false | 1,255 |
r
|
Rscript for UnitSquare.R
|
setwd("C:/Users/jaken/OneDrive/Documents/Spring 2017 SimTech/IDS6938-SimulationTechniques/Discrete2/Output/2DUnitSquare")
library(ggplot2)
library(grid)
library(gridExtra)
jakdf1 <- read.csv("raw_results_ranlux48_2D-uniform2.txt", header=FALSE)
p1 <- ggplot(jakdf1) + geom_point( aes(x=jakdf1$V1, y=jakdf1$V2)) + xlab("ranlux48")
jakdf2 <- read.csv("raw_results_ranlux48_2D-uniform2N10000.txt", header=FALSE)
p2 <- ggplot(jakdf2) + geom_point( aes(x=jakdf2$V1, y=jakdf2$V2)) + xlab("ranlux48")
jakdf3 <- read.csv("raw_results_ranlux48_2D-uniform2N100000.txt", header=FALSE)
p3 <- ggplot(jakdf3) + geom_point( aes(x=jakdf3$V1, y=jakdf3$V2)) + xlab("ranlux48")
grid.arrange(p3,p2,p1, ncol=1)
jakdf11 <- read.csv("raw_results_minstd_rand_2D-uniform2N1000.txt", header=FALSE)
jakdf12 <- read.csv("raw_results_minstd_rand_2D-uniform2N10000.txt", header=FALSE)
jakdf13 <- read.csv("raw_results_minstd_rand_2D-uniform2N100000.txt", header=FALSE)
p11 <- ggplot(jakdf11) + geom_point(aes(x=jakdf11$V1, y=jakdf11$V2)) + xlab("minstd_rand")
p12 <- ggplot(jakdf12) + geom_point(aes(x=jakdf12$V1, y=jakdf12$V2)) + xlab("minstd_rand")
p13 <- ggplot(jakdf13) + geom_point(aes(x=jakdf13$V1, y=jakdf13$V2)) + xlab("minstd_rand")
grid.arrange(p3,p13,p2,p12,p1,p11, ncol=2)
|
0bc976e2fa5f45ccf0921f69e747b49b624d449a
|
02e16d94c252fdcba74cd8bd397bdaae9d7758c7
|
/man/faConfInt.Rd
|
67ab62ccc5be4f32df950fa268c2a3a9739ffbbd
|
[] |
no_license
|
Matherion/ufs
|
be53b463262e47a2a5c4bcbc47827f85aa0c4eb2
|
9138cab0994d6b9ac0cea327a572243d66487afb
|
refs/heads/master
| 2020-03-24T21:11:05.053939 | 2019-02-12T10:33:48 | 2019-02-12T10:33:48 | 143,017,526 | 0 | 1 | null | null | null | null |
UTF-8
|
R
| false | true | 1,737 |
rd
|
faConfInt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/faConfInt.R
\name{faConfInt}
\alias{faConfInt}
\title{Extract confidence bounds from psych's factor analysis object}
\usage{
faConfInt(fa)
}
\arguments{
\item{fa}{The object produced by the \code{\link[psych:fa]{psych::fa()}} function from the
\link[psych:psych-package]{psych::psych-package} package. It is important that the \code{n.iter} argument
of\code{\link[psych:fa]{psych::fa()}} was set to a realistic number, because otherwise, no
confidence intervals will be available.}
}
\value{
A list of dataframes, one for each extracted factor, with in each
dataframe three variables: \item{lo}{lower bound of the confidence interval}
\item{est}{point estimate of the factor loading} \item{hi}{upper bound of
the confidence interval}
}
\description{
This function contains some code from a function in \link[psych:psych-package]{psych::psych-package}
that's not exported \code{print.psych.fa.ci} but useful nonetheless. It
basically takes the outcomes of a factor analysis and extracted the
confidence intervals.
}
\details{
THis function extract confidence interval bounds and combines them with
factor loadings using the code from the \code{print.psych.fa.ci} in
\link[psych:psych-package]{psych::psych-package}.
}
\examples{
\dontrun{
### Not run because it takes too long to run to test it,
### and may produce warnings, both because of the bootstrapping
### required to generate the confidence intervals in fa
faConfInt(psych::fa(Thurstone.33, 2, n.iter=100, n.obs=100));
}
}
\author{
William Revelle (extracted by Gjalt-Jorn Peters)
Maintainer: Gjalt-Jorn Peters \href{mailto:[email protected]}{[email protected]}
}
|
a1e76c911cf449adad8700f177fbe84c36c4dd25
|
2a04df4844316bcc181587008414b4a23277360f
|
/run_tests.R
|
2f612b313fd73515ea305345645206c3093c740d
|
[] |
no_license
|
jpalowitch/bmd
|
633822e6866811ef51fe6c63f41053b20f127155
|
3888df7847ac7f9fbfb976c3ac656bb867a5bde4
|
refs/heads/master
| 2021-01-11T18:10:03.477170 | 2017-12-04T07:31:48 | 2017-12-04T07:31:48 | 79,506,895 | 0 | 1 | null | 2017-07-05T09:33:57 | 2017-01-19T23:50:43 |
R
|
UTF-8
|
R
| false | false | 77 |
r
|
run_tests.R
|
library(testthat)
test_results <- test_dir("./tests", reporter = "summary")
|
a0745952576b743eb1caebe88422ec6a1389e150
|
7ae8b04333b69534a08cd8af2d6a27229af73a3a
|
/ui.R
|
4b97b1009c2860ecea2bdc925b7a99f20ea69890
|
[] |
no_license
|
Parkyuyoung/capstone2_F
|
953cccefa6084b05411113cc4845fc0a6cd70d6d
|
297828ed21089b844456382ea1e3fcff8eb2f380
|
refs/heads/master
| 2020-07-27T18:24:48.528306 | 2019-12-03T06:00:37 | 2019-12-03T06:00:37 | 209,185,562 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 12,615 |
r
|
ui.R
|
source("common.R")
jscode <- '
$(function() {
var $els = $("[data-proxy-click]");
$.each(
$els,
function(idx, el) {
var $el = $(el);
var $proxy = $("#" + $el.data("proxyClick"));
$el.keydown(function (e) {
if (e.keyCode == 13) {
$proxy.click();
}
});
}
);
});
'
### ui
shinyUI(navbarPage("Global Tactical Asset Allocation",
theme = shinythemes::shinytheme("united"),
# Main Page: Portfolio Return
tabPanel("Portfolio",
br(),
tabsetPanel(id = "inTabset", type = "tabs",
tabPanel("Search", value="tab1",
br(),
sidebarLayout(
sidebarPanel(
dateRangeInput('range', '날짜 범위',
start = '2008-01-01',
end = Sys.Date(),
min = '2008-01-01',
max = Sys.Date(),
format = "yyyy-mm-dd",
separator = " - "),
tags$head(tags$script(HTML(jscode))),
tagAppendAttributes(
#textInput("text", NULL, "foo"),
textInput("ticker", "자산 입력", "SPY"),
`data-proxy-click` = "btn_ewf"
),
actionButton("btn_ewf", "입력"),
actionButton("btn_ewf_delete", "삭제"),
#actionButton("test_btn_ewf", "테스트"),
p(" "),
verbatimTextOutput("nText"),
br(),
radioButtons("radioBtn1",
func_Title("radioBtn1"),
func_TitleList("radioBtn1"),
selected = 0),
radioButtons("radioBtn2",
func_Title("radioBtn2"),
func_TitleList("radioBtn2"),
selected = 0),
radioButtons("radioBtn3",
func_Title("radioBtn3"),
func_TitleList("radioBtn3"),
selected = 0),
radioButtons("radioBtn4",
func_Title("radioBtn4"),
func_TitleList("radioBtn4"),
selected = 0),
lapply(1:length(mychoice), function(i) {
column(5,
numericInput(inputId = paste0("numeric_rate_", i),
label = paste0("rate", i),
min = 0.1, max = 0.9, step = 0.1, value = 0.1, width='150px')
)
}),
fluidPage(
useShinyjs(),
numericInput(inputId = "numeric_momentum", label = "numeric_momentum",
min = 1, max = 10, step = 1, value = 1, width='100px')
),
fluidPage(
useShinyjs(),
numericInput(inputId = "numeric_multifac", label = "numeric_multifac",
min = 1, max = 10, step = 1, value = 3, width='100px')
),
fluidPage(
useShinyjs(),
numericInput(inputId = "numeric_min", label = "numeric_min",
min = 0.00, max = 0.9, step = 0.1, value = 0)
),
fluidPage(
useShinyjs(),
numericInput(inputId = "numeric_max", label = "numeric_max",
min = 0.00, max = 1, step = 1, value = 1)
),
p(" "),
sliderInput('sliderInput_lookback', '룩백',
min = 1, max = 100, step = 1, value = 12),
sliderInput('sliderInput_rebalancing', '리밸런싱',
min = 1, max = 48, step = 1, value = 3),
sliderInput('sliderInput_fee', '매매비용',
min = 0.001, max = 0.01, step = 0.001, value = 0.003),
#actionButton("btn_preview", "미리보기", placement="right"),
p(" "),
br(),
actionButton("goButton", "조회")
),
mainPanel(tabPanel("correlation", "상관관계(correlation)",
d3heatmapOutput("heatmap", width = "100%", height="500px")),
br(),br(),br(),br(),br(),br(),br(),
tabPanel("preview", "",
plotlyOutput("plot_preview"),
br(),
DT::dataTableOutput("dataTable_preview"))),
)
),
tabPanel("Cumulative Return", value="tab2",
br(),
plotlyOutput("port_ret"),
br(),
fluidRow(
column(12, tableOutput( "Performance_analysis"))
),
br(),
plotlyOutput("port_ret_yr"),
plotlyOutput("port_ret_yr2"),
br(),
fluidRow(
column(6, DT::dataTableOutput("port_table")),
column(6, DT::dataTableOutput("port_table_year"))
),
fluidRow(
column(1, offset = 10, downloadButton("download_monthly", "download(Monthly)")
)),
fluidRow(
column(1, offset = 10, downloadButton("download_yearly", "download(Yearly)")
))),
tabPanel("Weight", value="tab3",
br(),
plotlyOutput("wts_now"),
br(),
plotlyOutput("wts_hist"),
br(),
DT::dataTableOutput("wts_table")),
tabPanel("Raw Data", value="tab4",
br(),
plotlyOutput("plot_etf_raw"),
br(),
DT::dataTableOutput("dataTable_etf_raw"),
br(),
br())
)
),
# Author: Henry
tabPanel("About developer",
strong("홍성주"),
tags$ul(
tags$li("Phone nubmer : 010-8857-6301"),
tags$li("E-mail : [email protected]"),
tags$li("github : season0304"),
tags$li("major : Economics and Finance "),
br()
),
div(),
strong("박유영"),
tags$ul(
tags$li("Phone nubmer : 010-9616-4766"),
tags$li("E-mail : [email protected]"),
tags$li("github : parkyuyoung"),
tags$li("major : Data Analysis "),
br()
),
div(),
strong("김민찬"),
tags$ul(
tags$li("Phone nubmer : 010-2864-3564"),
tags$li("E-mail : [email protected]"),
tags$li("github : minclasse"),
tags$li("major : Computer Science "),
br()
),
div(),
strong("최영규"),
tags$ul(
tags$li("Phone nubmer : 010-2019-0700"),
tags$li("E-mail : [email protected]"),
tags$li("github : dudrb1418"),
tags$li("major : Computer Science "),
br()
)
)
))
|
984e00f1273c12d9ff3249db07fa84c9da89bbec
|
40bd7bdcd28e05e842c77749b381eb78cbd459cc
|
/plot3.R
|
fde4c4ca32a381cecdeae9c2f629232ccae23b22
|
[] |
no_license
|
jackman1224/ExData_Plotting1
|
f0f1d3610b4f1c5b9267c2b8f1063f733efab160
|
8a480d18306cfd63552abb88253aaafa5654eee9
|
refs/heads/master
| 2021-01-25T07:44:14.867667 | 2017-06-08T21:04:38 | 2017-06-08T21:04:38 | 93,657,387 | 0 | 0 | null | 2017-06-07T16:39:28 | 2017-06-07T16:39:28 | null |
UTF-8
|
R
| false | false | 1,330 |
r
|
plot3.R
|
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, destfile = "./Dataset.zip")
unzip(zipfile = "./Dataset.zip", exdir = "./")
hpc <- read.table("C:/Users/jackman/Desktop/R Files/Exploratory Data Analysis/Week 1/Electric Power Consumption Exercise/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?", colClasses = c("character","character", "numeric", "numeric","numeric","numeric","numeric","numeric","numeric"))
hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y")
hpc_subset <- subset(hpc, Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
hpc_complete <- hpc_subset[complete.cases(hpc_subset),]
dateTime <- paste(hpc_complete$Date,hpc_complete$Time)
dateTime <- setNames(dateTime, "DateTime")
hpc_complete <- hpc_complete[,!(names(hpc_complete) %in% c("Date", "Time"))]
hpc_complete <- cbind(dateTime,hpc_complete)
hpc_complete$dateTime <- as.POSIXct(as.character(hpc_complete$dateTime, format = "%d/%m/%Y %H:%M:%S"))
plot(hpc_complete$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(hpc_complete$Sub_metering_2, col = "red")
lines(hpc_complete$Sub_metering_3, col = "blue")
legend("topright", col = c("black", "red", "blue"), lwd = c(1,1,1), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
|
1ca37e4769138d306464607fdbece955ec5a32e7
|
57ce4924de86c96cf663737ae5f0291fc616d0a4
|
/Utils.R
|
c64b05cd46c31e3b14beac9a6b91aab706376971
|
[] |
no_license
|
lfmingo/GramAnt
|
a482fd49f7e9c4bdfabed9b7128ac813dba680f5
|
5525a8d0af863bf5bef90d9692f4aa503c829b2d
|
refs/heads/master
| 2021-07-15T19:42:06.096865 | 2017-10-19T08:02:47 | 2017-10-19T08:02:47 | 107,306,274 | 0 | 1 | null | 2017-10-19T08:02:48 | 2017-10-17T18:10:43 |
R
|
UTF-8
|
R
| false | false | 2,270 |
r
|
Utils.R
|
ReadBNFFile <- function(filename) {
# reads a bnf grammar file and returns a list structure
# read the file line by line
con=file(filename, open="r")
lines=readLines(con)
close(con)
# parse the lines
rule_list = list()
for (l in lines) {
l = trim_space(l)
gram_line = strsplit(l, "::=")[[1]]
if (length(gram_line) > 1) {
# split and trim rules
rules = strsplit(trim_space(gram_line[2]), "|", fixed=TRUE)
for (j in seq_along(rules[[1]])) {
rules[[1]][[j]] = trim_space(rules[[1]][[j]])
}
# add rules to list
i = length(rule_list) + 1
rule_list[[i]] = list()
rule_list[[i]][[1]] = trim_space(gram_line[[1]])
rule_list[[i]][[2]] = as.list(rules[[1]])
print(rule_list)
}
}
return (rule_list)
}
trim_brackets <- function (x) gsub("^<+|>+$", "", x)
trim_space <- function (x) gsub("^\\s+|\\s+$", "", x)
findRule <- function(grammar, non_terminal) {
idx <- grep(non_terminal,lapply(grammar, function(x) x[[1]]))
if (length(idx) == 0) return (NULL)
grammar[idx][[1]][[2]]
}
apply_rule <- function (expression, rule, consequent, first_NT) {
expr <- list()
idx <- grep(first_NT, expression)[1]
res <- as.list(strsplit(trim_space(rule[[consequent]]), " ", fixed=TRUE)[[1]])
if (idx > 1)
expr <- append(expr,expression[1:(idx-1)])
expr <- append(expr, res)
if (idx < length(expression))
expr <- append(expr,expression[(idx+1):length(expression)])
## cat("Original expression ", unlist(expression), "\n")
## cat(paste("Applying rule ", first_NT, " ::= ", unlist(rule[[consequent]])), "\n")
## cat("Obtained expression ", unlist(expr), "\n\n")
expr
}
iterate_rules <- function(expression, g) {
stop <- TRUE
solution <- FALSE
first_NT <- expression[grep ("<[[:alnum:]]+>", expression)[1]]
if (is.null(first_NT[[1]])) {
## cat(" ---- non_terminal NOT found ---- \n")
solution <- TRUE
} else {
rule <- findRule(g, first_NT)
if (is.null(rule)) {
## cat(paste(" ---- not rule found for expression ",unlist(expression),"\n"))
} else {
expression <- apply_rule(expression, rule, sample(1:length(rule), 1), first_NT)
stop = FALSE
}
}
list(expression, stop, solution)
}
|
62624169c1036c65cf0af803203b40b21811bd65
|
51f14fb4b19eb9e5fd6b26552d128f1cc0ff9875
|
/R/distributions.R
|
018eee21246b0a737e2566ea88731196ce2fb095
|
[] |
no_license
|
zhaoxiaohe/greta
|
62222ad5d73ae1328f7708d67e39890a2191c2fd
|
1489a7272d041f97b780d844b889f1124d7a0726
|
refs/heads/master
| 2021-01-20T07:22:55.088857 | 2017-05-01T13:01:39 | 2017-05-01T13:01:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 17,698 |
r
|
distributions.R
|
flat_distribution <- R6Class (
'flat_distribution',
inherit = distribution,
public = list(
to_free = function (y) {
upper <- self$parameters$upper$value()
lower <- self$parameters$lower$value()
qlogis((y - lower) / (upper - lower))
},
tf_from_free = function (x, env) {
# cannot allow the upper and lower values to be nodes
# otherwise it would severely screw with the gradients
upper <- self$parameters$upper$value()
lower <- self$parameters$lower$value()
(1 / (1 + tf$exp(-1 * x))) * (upper - lower) + lower
},
initialize = function (lower = -1e6, upper = 1e6, dim) {
if (!(is.numeric(lower) & is.numeric(upper) &
is.finite(lower) & is.finite(upper) &
length(lower) == 1 & length(upper) == 1)) {
stop ('lower and upper must be finite scalars')
}
super$initialize('flat', dim)
self$add_parameter(lower, 'lower')
self$add_parameter(upper, 'upper')
},
tf_log_density_function = function (value, parameters)
tf$constant(0, dtype = tf$float32)
)
)
free_distribution <- R6Class (
'free_distribution',
inherit = distribution,
public = list(
to_free = function (y) y,
tf_from_free = function (x, env) x,
initialize = function (dim = 1)
super$initialize('free', dim),
tf_log_density_function = function (value, parameters)
tf$constant(0, dtype = tf$float32)
)
)
normal_distribution <- R6Class (
'normal_distribution',
inherit = distribution,
public = list(
to_free = function (y) y,
tf_from_free = function (x, env) x,
initialize = function (mean, sd, dim) {
# add the nodes as children and parameters
dim <- check_dims(mean, sd, target_dim = dim)
super$initialize('normal', dim)
self$add_parameter(mean, 'mean')
self$add_parameter(sd, 'sd')
},
tf_log_density_function = function (x, parameters) {
mean <- parameters$mean
var <- tf$square(parameters$sd)
-0.5 * tf$log(2 * pi) - 0.5 * tf$log(var) - 0.5 * tf$square(tf$subtract(mean, x)) / var
}
)
)
lognormal_distribution <- R6Class (
'lognormal_distribution',
inherit = distribution,
public = list(
to_free = log,
tf_from_free = function (x, env) tf$exp(x),
initialize = function (meanlog, sdlog, dim) {
dim <- check_dims(meanlog, sdlog, target_dim = dim)
super$initialize('lognormal', dim)
self$add_parameter(meanlog, 'meanlog')
self$add_parameter(sdlog, 'sdlog')
},
tf_log_density_function = function (x, parameters) {
mean <- parameters$meanlog
sd <- parameters$sdlog
var <- tf$square(sd)
lx <- tf$log(x)
-1 * (lx + tf$log(sd) + 0.9189385) +
-0.5 * tf$square(tf$subtract(lx, mean)) / var
}
)
)
bernoulli_distribution <- R6Class (
'bernoulli_distribution',
inherit = distribution,
public = list(
to_free = function (y)
stop ('cannot infer discrete random variables'),
tf_from_free = function (x, env)
stop ('cannot infer discrete random variables'),
initialize = function (prob, dim) {
# add the nodes as children and parameters
dim <- check_dims(prob, target_dim = dim)
super$initialize('bernoulli', dim, discrete = TRUE)
self$add_parameter(prob, 'prob')
},
tf_log_density_function = function (x, parameters) {
prob <- parameters$prob
# optionally reshape prob
prob_shape <- prob$get_shape()$as_list()
x_shape <- x$get_shape()$as_list()
if (identical(prob_shape, c(1L, 1L)) & !identical(x_shape, c(1L, 1L)))
probs <- tf$tile(prob, x_shape)
tf$log(tf$where(tf$equal(x, 1), probs, 1 - probs))
}
)
)
binomial_distribution <- R6Class (
'binomial_distribution',
inherit = distribution,
public = list(
to_free = function (y)
stop ('cannot infer discrete random variables'),
tf_from_free = function (x, env)
stop ('cannot infer discrete random variables'),
initialize = function (size, prob, dim) {
# add the nodes as children and parameters
dim <- check_dims(size, prob, target_dim = dim)
super$initialize('binomial', dim, discrete = TRUE)
self$add_parameter(size, 'size')
self$add_parameter(prob, 'prob')
},
tf_log_density_function = function (x, parameters) {
size <- parameters$size
prob <- parameters$prob
log_choose <- tf$lgamma(size + 1) - tf$lgamma(x + 1) -
tf$lgamma(size - x + 1)
log_choose + x * tf$log(prob) + (size - x) * tf$log(1 - prob)
}
)
)
poisson_distribution <- R6Class (
'poisson_distribution',
inherit = distribution,
public = list(
to_free = function (y)
stop ('cannot infer discrete random variables'),
tf_from_free = function (x, env)
stop ('cannot infer discrete random variables'),
initialize = function (lambda, dim) {
# add the nodes as children and parameters
dim <- check_dims(lambda, target_dim = dim)
super$initialize('poisson', dim, discrete = TRUE)
self$add_parameter(lambda, 'lambda')
},
tf_log_density_function = function (x, parameters) {
lambda <- parameters$lambda
x * tf$log(lambda) - lambda - tf$lgamma(x + 1)
}
)
)
negative_binomial_distribution <- R6Class (
'negative_binomial_distribution',
inherit = distribution,
public = list(
to_free = function (y)
stop ('cannot infer discrete random variables'),
tf_from_free = function (x, env)
stop ('cannot infer discrete random variables'),
initialize = function (size, prob, dim) {
# add the nodes as children and parameters
dim <- check_dims(size, prob, target_dim = dim)
super$initialize('negative_binomial', dim, discrete = TRUE)
self$add_parameter(size, 'size')
self$add_parameter(prob, 'prob')
},
tf_log_density_function = function (x, parameters) {
size <- parameters$size
prob <- parameters$prob
log_choose <- tf$lgamma(x + size) - tf$lgamma(x + 1) -
tf$lgamma(size)
log_choose + size * tf$log(prob) + x * tf$log(1 - prob)
}
)
)
gamma_distribution <- R6Class (
'gamma_distribution',
inherit = distribution,
public = list(
to_free = function (y) log(expm1(y)),
tf_from_free = function (x, env) tf_log1pe(x),
initialize = function (shape, rate, dim) {
# add the nodes as children and parameters
dim <- check_dims(shape, rate, target_dim = dim)
super$initialize('gamma', dim)
self$add_parameter(shape, 'shape')
self$add_parameter(rate, 'rate')
},
tf_log_density_function = function (x, parameters) {
shape <- parameters$shape
scale <- 1 /parameters$rate
-shape * tf$log(scale) - tf$lgamma(shape) +
(shape - 1) * tf$log(x) - x / scale
}
)
)
exponential_distribution <- R6Class (
'exponential_distribution',
inherit = distribution,
public = list(
to_free = function (y) log(expm1(y)),
tf_from_free = function (x, env) tf_log1pe(x),
initialize = function (rate, dim) {
# add the nodes as children and parameters
dim <- check_dims(rate, target_dim = dim)
super$initialize('exponential', dim)
self$add_parameter(rate, 'rate')
},
tf_log_density_function = function (x, parameters) {
rate <- parameters$shape
-1 * x / rate - tf$log(rate)
}
)
)
student_distribution <- R6Class (
'student_distribution',
inherit = distribution,
public = list(
to_free = function (y) y,
tf_from_free = function (x, env) x,
initialize = function (df, ncp, dim) {
# add the nodes as children and parameters
dim <- check_dims(df, ncp, target_dim = dim)
super$initialize('student', dim)
self$add_parameter(df, 'df')
self$add_parameter(ncp, 'ncp')
},
tf_log_density_function = function (x, parameters) {
df <- parameters$df
ncp <- parameters$ncp
const <- tf$lgamma((df + 1) * 0.5) - tf$lgamma(df * 0.5) -
0.5 * (tf$log(df) + log(pi))
const - 0.5 * (df + 1) * tf$log(1 + (1 / df) * (tf$square(x - ncp)))
}
)
)
beta_distribution <- R6Class (
'beta_distribution',
inherit = distribution,
public = list(
to_free = function (y) qlogis(y),
tf_from_free = function (x, env) tf_ilogit(x),
initialize = function (shape1, shape2, dim) {
# add the nodes as children and parameters
dim <- check_dims(shape1, shape2, target_dim = dim)
super$initialize('beta', dim)
self$add_parameter(shape1, 'shape1')
self$add_parameter(shape2, 'shape2')
},
tf_log_density_function = function (x, parameters) {
shape1 <- parameters$shape1
shape2 <- parameters$shape2
(shape1 - 1) * tf$log(x) +
(shape2 - 1) * tf$log(1 - x) +
tf$lgamma(shape1 + shape2) -
tf$lgamma(shape1) - tf$lgamma(shape2)
}
)
)
# need to add checking of mean and Sigma dimensions
multivariate_normal_distribution <- R6Class (
'multivariate_normal_distribution',
inherit = distribution,
public = list(
to_free = function (y) y,
tf_from_free = function (x, env) x,
initialize = function (mean, Sigma, dim) {
# coerce the parameter arguments to nodes and add as children and
# parameters
super$initialize('multivariate_normal', dim)
self$add_parameter(mean, 'mean')
self$add_parameter(Sigma, 'Sigma')
# check mean has the correct dimensions
if (self$parameters$mean$dim[1] != dim) {
stop (sprintf('mean has %i rows, but the distribution has dimension %i',
self$parameters$mean$dim[1], dim))
}
# check Sigma is square
if (self$parameters$Sigma$dim[1] != self$parameters$Sigma$dim[2]) {
stop (sprintf('Sigma must be square, but has %i rows and %i columns',
self$parameters$Sigma$dim[1],
self$parameters$Sigma$dim[1]))
}
# Sigma has the correct dimensions
if (self$parameters$Sigma$dim[1] != dim) {
stop (sprintf('Sigma has dimension %i, but the distribution has dimension %i',
self$parameters$Sigma$dim[1], dim))
}
},
tf_log_density_function = function (x, parameters) {
mean <- parameters$mean
Sigma <- parameters$Sigma
# number of observations & dimension of distribution
nobs <- x$get_shape()$as_list()[2]
dim <- x$get_shape()$as_list()[1]
# Cholesky decomposition of Sigma
L <- tf$cholesky(Sigma)
# whiten (decorrelate) the errors
diff <- x - mean
diff_col <- tf$reshape(diff, shape(dim, nobs))
alpha <- tf$matrix_triangular_solve(L, diff_col, lower = TRUE)
# calculate density
tf$constant(-0.5 * dim * nobs * log(2 * pi)) -
tf$constant(nobs, dtype = tf$float32) *
tf$reduce_sum(tf$log(tf$diag_part(L))) -
tf$constant(0.5) * tf$reduce_sum(tf$square(alpha))
}
)
)
# need to add checking of mean and Sigma dimensions
wishart_distribution <- R6Class (
'wishart_distribution',
inherit = distribution,
public = list(
# grab it in lower-triangular form, so it's upper when putting it back in python-style
to_free = function (y) {
L <- t(chol(y))
vals <- L[lower.tri(L, diag = TRUE)]
matrix(vals)
},
tf_from_free = function (x, env) {
dims <- self$parameters$Sigma$dim
L_dummy <- greta:::dummy(dims)
indices <- sort(L_dummy[upper.tri(L_dummy, diag = TRUE)])
values <- tf$zeros(shape(prod(dims), 1), dtype = tf$float32)
values <- greta:::recombine(values, indices, x)
L <- tf$reshape(values, shape(dims[1], dims[2]))
tf$matmul(tf$transpose(L), L)
},
initialize = function (df, Sigma, dim) {
# add the nodes as children and parameters
super$initialize('wishart', c(dim, dim))
self$add_parameter(df, 'df')
self$add_parameter(Sigma, 'Sigma')
# check Sigma is square
if (self$parameters$Sigma$dim[1] != self$parameters$Sigma$dim[2]) {
stop (sprintf('Sigma must be square, but has %i rows and %i columns',
self$parameters$Sigma$dim[1],
self$parameters$Sigma$dim[1]))
}
# Sigma has the correct dimensions
if (self$parameters$Sigma$dim[1] != dim) {
stop (sprintf('Sigma has dimension %i, but the distribution has dimension %i',
self$parameters$Sigma$dim[1], dim))
}
# make the initial value PD
self$value(unknowns(dims = c(dim, dim), data = diag(dim)))
},
tf_log_density_function = function (x, parameters) {
df <- parameters$df
Sigma <- parameters$Sigma
dist <- tf$contrib$distributions$WishartFull(df = df, scale = Sigma)
tf$reshape(dist$log_pdf(x), shape(1, 1))
}
)
)
# export constructors
#' @name greta-distributions
#' @title greta probability distributions
#' @description These probability distributions can be used to define random
#' variables in a greta model. They return a greta array object that can be
#' combined with other greta arrays to construct a model.
#'
#' @param mean,meanlog,ncp unconstrained parameters
#' @param sd,sdlog,size,lambda,shape,rate,df,shape1,shape2 positive parameters
#' @param prob probability parameter (\code{0 < prob < 1})
#' @param Sigma positive definite variance-covariance matrix parameter
#'
#' @param range a finite, length 2 numeric vector giving the range of values to
#' which \code{flat} distributions are constrained. The first element must
#' be lower than the second.
#'
#' @param dim the dimensions of the variable. For univariate distributions this
#' can be greater than 1 to represent multiple independent variables. For
#' multivariate distributions this cannot be smaller than 2.
#'
#' @details Most of these distributions have non-uniform probability densities,
#' however the distributions \code{flat} and \code{free} do not. These can
#' therefore be used as parameters in likelihood (rather than Bayesian)
#' inference.
#'
#' The discrete probability distributions (\code{bernoulli}, \code{binomial},
#' \code{negative_binomial}, \code{poisson}) can be used as likelihoods, but
#' not as unknown variables.
#'
#' Wherever possible, the parameterisation of these distributions matches the
#' those in the \code{stats} package. E.g. for the parameterisation of
#' \code{negative_binomial()}, see \code{\link{dnbinom}}.
#'
#' @examples
#'
#' # a fixed distribution, e.g. for a prior
#' mu = normal(0, 1)
#'
#' # an unconstrained, positive parameter sigma
#' log_sigma = free()
#' sigma = exp(log_sigma)
#'
#' # a hierarchical distribution
#' theta = normal(mu, lognormal(0, 1))
#'
#' # a vector of 3 variables drawn from the same hierarchical distribution
#' thetas = normal(mu, sigma, dim = 3)
#'
#' # a matrix of 12 variables drawn from the same hierarchical distribution
#' thetas = normal(mu, sigma, dim = c(3, 4))
#'
#' # a constrained variable with no density (e.g. for a constrained likelihood model)
#' theta = flat(c(1, 5))
#'
#' # a multivariate normal variable, with correlation between two elements
#' Sig <- diag(4)
#' Sig[3, 4] <- Sig[4, 3] <- 0.6
#' theta = multivariate_normal(rep(mu, 4), Sig, dim = 4)
#'
#' # a Wishart variable with the same covariance parameter
#' theta = wishart(df = 5, Sigma = Sig, dim = 4)
NULL
#' @rdname greta-distributions
#' @export
normal <- function (mean, sd, dim = NULL)
ga(normal_distribution$new(mean, sd, dim))
#' @rdname greta-distributions
#' @export
lognormal <- function (meanlog, sdlog, dim = NULL)
ga(lognormal_distribution$new(meanlog, sdlog, dim))
#' @rdname greta-distributions
#' @export
bernoulli <- function (prob, dim = NULL)
ga(bernoulli_distribution$new(prob, dim))
#' @rdname greta-distributions
#' @export
binomial <- function (size, prob, dim = NULL)
ga(binomial_distribution$new(size, prob, dim))
#' @rdname greta-distributions
#' @export
negative_binomial <- function (size, prob, dim = NULL)
ga(negative_binomial_distribution$new(size, prob, dim))
#' @rdname greta-distributions
#' @export
poisson <- function (lambda, dim = NULL)
ga(poisson_distribution$new(lambda, dim))
#' @rdname greta-distributions
#' @export
gamma <- function (shape, rate, dim = NULL)
ga(gamma_distribution$new(shape, rate, dim))
#' @rdname greta-distributions
#' @export
exponential <- function (rate, dim = NULL)
ga(exponential_distribution$new(rate, dim))
#' @rdname greta-distributions
#' @export
student <- function (df, ncp, dim = NULL)
ga(student_distribution$new(df, ncp, dim))
#' @rdname greta-distributions
#' @export
beta <- function (shape1, shape2, dim = NULL)
ga(beta_distribution$new(shape1, shape2, dim))
#' @rdname greta-distributions
#' @export
free <- function (dim = 1)
ga(free_distribution$new(dim))
#' @rdname greta-distributions
#' @export
flat <- function (range, dim = 1) {
if (is.greta_array(range))
stop ('range must be fixed, and cannot be another greta array')
if (!(is.vector(range) && length(range) == 2 &&
is.numeric(range) && range[1] < range[2])) {
stop ('range must be a length 2 numeric vector in ascending order')
}
ga(flat_distribution$new(lower = range[1], upper = range[2], dim = dim))
}
#' @rdname greta-distributions
#' @export
multivariate_normal <- function (mean, Sigma, dim)
ga(multivariate_normal_distribution$new(mean, Sigma, dim))
#' @rdname greta-distributions
#' @export
wishart <- function (df, Sigma, dim)
ga(wishart_distribution$new(df, Sigma, dim))
|
fe7c72b46691dbc28ceb0408374f6b325a0330a2
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Wintersteiger/RankingFunctions/rankfunc39_signed_32/rankfunc39_signed_32.R
|
4e3785e8fb841422477867282dffbb522a165911
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 835 |
r
|
rankfunc39_signed_32.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 7401
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7383
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 7383
c
c Input Parameter (command line, file):
c input filename QBFLIB/Wintersteiger/RankingFunctions/rankfunc39_signed_32.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2892
c no.of clauses 7401
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 7383
c
c QBFLIB/Wintersteiger/RankingFunctions/rankfunc39_signed_32.qdimacs 2892 7401 E1 [605 606 769 770 1159 1160 1225 1226 1782 1783 1976 1977 2170 2171 2364 2365 2558 2559] 0 320 2552 7383 RED
|
17b8609592345a64d455705ccfeb88bdb347b4d5
|
51f891721c5ad00748780d3bf5df9018c7537277
|
/other/R/server.R
|
17474d2e7b009d1421855f27bef48ad3db723d5e
|
[] |
no_license
|
rmutalik/VisualAnalytics
|
a6e573db02260f86bdb7972e5f2c67ac4a9b43de
|
ed1a1ffd71cf57004f9f1a76e8b8bf8000eed78a
|
refs/heads/master
| 2022-11-27T09:02:20.005811 | 2020-08-04T02:29:33 | 2020-08-04T02:29:33 | 276,780,236 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 364 |
r
|
server.R
|
server <- function(input, output, session) {
output$plot <- renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(-30, 30, zoom = 2)
})
output$map <- renderPlotly({
plot_geo(geo_ports, lat = ~lat, lon = ~lng) %>%
add_markers(
text = ~paste(paste("Slaves: ", n_slaves_arrived)),
hoverinfo = "text"
)
})
}
|
334fd0bf63295a58764acc2929ff247b0a64e65b
|
f663a843dcd66b1d4e15bfe6b9a6f618a169c3f7
|
/fluoro/R/helper_functs.R
|
c27e23af7ef32e9b24c17eb351615db755d6483c
|
[
"MIT"
] |
permissive
|
rhlee12/Fluoro-Package
|
44556f53aaf7a455aa9229138b11367143e90903
|
07d6f88df2a56ad9220d12de96ee53b9e2cfedae
|
refs/heads/master
| 2021-03-30T17:56:33.852014 | 2018-05-30T22:18:51 | 2018-05-30T22:18:51 | 118,687,653 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 3,147 |
r
|
helper_functs.R
|
gen.seq=function(raw.eem){
em=as.numeric(raw.eem[3:length(raw.eem[,1]), 1]) # HEADER IS NEEEDED
ex=as.numeric(raw.eem[1, 2:length(raw.eem)]) # HEADER IS NEEEDED
return(list(em=em,ex=ex))
}
raman.correct=function(raman){
raman.begin=as.numeric(raman[3,1]) #raman start wavelenth
raman.end=as.numeric(raman[length(raman[,1]),1]) # raman end wavelength
no.head.raman=raman[3:length(raman[,1]),]
#trim.raman=as.numeric(raman[-c(1,2),]) #USE FULL SCAN?
trim.raman=data.frame(no.head.raman[which(as.numeric(no.head.raman[,1])>=370),]) #pegged at a start of 370 nm
for(i in 1:length(trim.raman)){
trim.raman[,i]=as.numeric(trim.raman[,i])
}
r.sum=0
for( i in 1:(length(trim.raman[,1])-1)){ #This integrates from RamanBegin to RamanEnd.
y0 = as.numeric(trim.raman[i, 3])
y1 = as.numeric(trim.raman[i+1, 3])
dx = as.numeric(trim.raman[i+1, 1]) - as.numeric(trim.raman[i, 1])
r.sum = r.sum + dx * (y0 + y1)/2;
}
base.rect=(trim.raman[1,3]+trim.raman[length(trim.raman[,1])-1,1])/2*(trim.raman[length(trim.raman[,1])-1,1]-trim.raman[1,1])
raman.area=r.sum-base.rect
return(raman.area)
}
# Normailize the blank to the raman (F4 only)
# Takes a blank file and raman.area, the output of raman.correct
blank.raman.norm=function(blank, raman.area){
trim.blank=blank[3:length(blank[,1]), 2:length(blank)]
for(i in 1:length(trim.blank)){
trim.blank[,i]=as.numeric(trim.blank[,i])
}
blank.normal.raman=trim.blank/raman.area
return(blank.normal.raman)
}
ifc=function(raw.eem, corrected.trim.uv){
seq=fluoro:::gen.seq(raw.eem)
ex.abs=corrected.trim.uv[corrected.trim.uv[,1] %in% seq$ex,2]
em.abs=corrected.trim.uv[corrected.trim.uv[,1] %in% seq$em,2]
#empty data frame
IFC=data.frame(matrix(data=NA, nrow=length(seq$em), ncol = length(seq$ex)))
for(ex in 1:length(ex.abs)){
for(em in 1:length(em.abs)){
IFC[em, ex]=em.abs[em]+ex.abs[ex]
}
}
return(IFC)
}
##Emission Excitation Correction for F2/F3 EEM-like objects
em.ex.corr=function(eem, em.corr.file, ex.corr.file){
em.corr = as.numeric(unlist(readxl::read_excel(em.corr.file, col_names = F))) #Emission Correction
ex.corr = as.numeric(unlist(readxl::read_excel(ex.corr.file, col_names = F))) #Excitation Correction
Y=diag(em.corr)
X=diag(ex.corr)
int.eem=t(as.matrix(eem) %*% X) # Make sure all this squares w/ Matlab code
corr.eem=data.frame(t(int.eem %*% Y)) # Make sure all this squares w/ Matlab code
ic.eem=corr.eem %>% `colnames<-`(value=colnames(eem)) %>% `rownames<-`(value=rownames(eem))
return(ic.eem)
}
mask.300=function(eem.dir){
eems=list.files(path = eem.dir, pattern = "_c.csv", recursive = T, full.names = T) #find all corrected files
load.eems=lapply(eems, fluoro::read.corr.eem)
sub.300=function(x){
x["300","300"]=((x["300", "290"]+x["300", "310"])/2)
return(x)
}
fixed.eems=lapply(load.eems, sub.300)
for(i in 1:length(fixed.eems)){
write.csv(fixed.eems[[i]], file = eems[i], row.names = T)
}
}
|
98e1380143aae6609fb8837e1a0e51d3f5f4f318
|
697e3ac9cbe9010ed9b50f356a2cddf5ed8cc8a0
|
/R/vreq_classic_methods.R
|
e64c75562690afe93db1db17eda9d15ccb5b54cf
|
[] |
no_license
|
reumandc/tsvr
|
4c2b2b0c9bbbb191ae55058648da87589bc25e01
|
f8f7a72d4f8ba40e881e78a1a2fb53791d227d21
|
refs/heads/master
| 2021-06-01T14:27:34.757125 | 2021-01-08T17:09:11 | 2021-01-08T17:09:11 | 132,662,500 | 1 | 1 | null | null | null | null |
UTF-8
|
R
| false | false | 2,609 |
r
|
vreq_classic_methods.R
|
#' Basic methods for the \code{vreq_classic} class
#'
#' Set, get, summary, and print methods for the \code{vreq_classic} class.
#'
#' @param object,x,obj An object of class \code{vreq_classic}
#' @param newval A new value, for the \code{set_*} methods
#' @param ... Not currently used. Included for argument consistency
#' with existing generics.
#'
#' @return \code{summary.vreq_classic} produces a summary of a \code{vreq_classic} object.
#' A \code{print.vreq_classic} method is also available. For \code{vreq_classic} objects,
#' \code{set_*} and \code{get_*} methods are available for all slots (see
#' the documentation for \code{vreq_classic} for a list). The \code{set_*} methods
#' just throw an error, to prevent breaking the consistency between the
#' slots of a \code{vreq_classic} object.
#'
#' @author Daniel Reuman, \email{reuman@@ku.edu}
#'
#' @references
#' Peterson (1975) Stability of species and of community for the benthos of two lagoons. Ecology 56, 958-965.
#'
#' @seealso \code{\link{vreq_classic}}
#'
#' @examples
#' X<-matrix(runif(10*100),10,100)
#' res<-vreq_classic(X)
#' print(res)
#' summary(res)
#'
#' @name vreq_classic_methods
NULL
#> NULL
#' @rdname vreq_classic_methods
#' @export
summary.vreq_classic<-function(object,...)
{
res<-list(class="vreq_classic",
com=get_com(object),
comnull=get_comnull(object),
vr=get_vr(object))
#a summary_tsvr object inherits from the list class, but has its own print method
class(res)<-c("summary_tsvr","list")
return(res)
}
#' @rdname vreq_classic_methods
#' @export
print.vreq_classic<-function(x,...)
{
cat(paste0("Object of class vreq_classic:\n CVcom2: ",get_com(x),"\n CVcomip2: ",get_comnull(x),"\n classic vr: ",get_vr(x)))
}
#' @rdname vreq_classic_methods
#' @export
set_com.vreq_classic<-function(obj,newval)
{
stop("Error in set_com: vreq_classic slots should not be changed individually")
}
#' @rdname vreq_classic_methods
#' @export
set_comnull.vreq_classic<-function(obj,newval)
{
stop("Error in set_comnull: vreq_classic slots should not be changed individually")
}
#' @rdname vreq_classic_methods
#' @export
set_vr.vreq_classic<-function(obj,newval)
{
stop("Error in set_vr: vreq_classic slots should not be changed individually")
}
#' @rdname vreq_classic_methods
#' @export
get_com.vreq_classic<-function(obj)
{
return(obj$com)
}
#' @rdname vreq_classic_methods
#' @export
get_comnull.vreq_classic<-function(obj)
{
return(obj$comnull)
}
#' @rdname vreq_classic_methods
#' @export
get_vr.vreq_classic<-function(obj)
{
return(obj$vr)
}
|
f58c4397653f21c4808387d9a0b1e3b0c1657cf8
|
eaa977b7723a7ea9d54f0d2fff0a250b702445d1
|
/tools/analysisTools.r
|
efe0a0a9a5544f1bd5594b72f721d3b840801d76
|
[
"BSD-3-Clause"
] |
permissive
|
hyunjimoon/laplace_manuscript
|
894e5620870814b9bb43ddaeabac5001428b6716
|
f0e967a38d8562e3d1d2b646dfcc0387c0912ace
|
refs/heads/master
| 2022-11-07T08:51:59.245244 | 2020-04-28T21:22:44 | 2020-04-28T21:22:44 | 275,398,155 | 0 | 0 |
BSD-3-Clause
| 2020-06-27T15:17:43 | 2020-06-27T15:17:42 | null |
UTF-8
|
R
| false | false | 4,527 |
r
|
analysisTools.r
|
#########################################################################
## Tools to analyze results from the cluster
select_lambda <- function(parm, quant, n_select) {
p <- ncol(parm)
n <- nrow(parm)
quantile_parm <- rep(NA, p)
for (i in 1:p) quantile_parm[i] <- sort(parm[, i])[quant * n]
selected <- sort(quantile_parm, decreasing = T)[1:n_select]
which(quantile_parm %in% selected)
}
construct_plot_data <- function(parm, nIter, nChains, names) {
iteration <- rep(1:nIter, nChains)
chain <- rep(1:nChains, each = nIter)
posterior.sample <- data.frame(parm, iteration, as.factor(chain))
# names(posterior.sample) <- c(paste0("log_lambda[", index, "]"), "iteration", "chain")
names(posterior.sample) <- names
posterior.sample <- posterior.sample %>%
gather(key = parameter, value = value, -chain, -iteration)
}
trace_plot <- function(posterior.sample) {
trace.plot <- ggplot(data = posterior.sample,
aes(x = iteration, y = value, color = chain)) +
geom_line() + theme_bw() + facet_wrap(~ parameter)
print(trace.plot)
}
density_hist <- function(posterior.sample, bins = 30) {
density.plot <- ggplot(data = posterior.sample,
aes(x = value, color = chain, fill = chain)) +
geom_histogram(alpha = 0.25, position = "identity", bins = bins) +
theme_bw() + facet_wrap(~ parameter) +
theme(text = element_text(size = 15))
print(density.plot)
}
quant_select_plot <- function(parm, quant, threshold = 3.5) {
index <- 1:ncol(parm)
parm_quant <- apply(parm, 2, quantile, quant)
ggplot(data = data.frame(index = index, parm_quant = parm_quant),
aes(x = index, y = parm_quant)) + geom_point(size = 0.25) +
geom_text(aes(label = ifelse(parm_quant > threshold, index, '')),
hjust = 0, vjust = 0) +
theme_bw() + theme(text = element_text(size = 18))
}
quant_select_plot2 <- function(parm1, parm2, quant, threshold = 3.5, alpha = 0.05,
x = 0.95, y = 0.8, index_offset = 0) {
index <- c(1:ncol(parm1), 1:ncol(parm2)) + index_offset
parm_quant <- c(apply(parm1, 2, quantile, quant),
apply(parm2, 2, quantile, quant))
method <- c(rep("(full) HMC", ncol(parm1)), rep("HMC + Laplace", ncol(parm2)))
ggplot(data = data.frame(index = index, parm_quant = parm_quant, method = method),
aes(x = index, y = parm_quant, color = method)) +
geom_text(aes(label = ifelse(parm_quant > threshold, index, '')),
hjust = 0, vjust = 0) +
geom_point(size = 0.25, alpha = alpha) +
theme_bw() +
theme(
legend.position = c(x, y),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6),
text = element_text(size = 15)
) + xlab("covariate index") + ylab("90th quantile")
}
summary_table <- function(log_lambda, tau, caux, index) {
standard_post <- data.frame(log_lambda, tau, caux)
names(standard_post) <- c(paste0("log_lambda[", index, "]"),
"tau", "caux")
standard_post <- as_draws(standard_post)
draw_summary <- summarise_draws(standard_post)
time <- time <- sum(colSums(get_elapsed_time(stanfit))) +
sum(colSums(get_elapsed_time(stanfit2)))
draw_summary$eff_bulk <- draw_summary$ess_bulk / time
draw_summary$eff_tail <- draw_summary$ess_tail / time
draw_summary
}
sample_comparison_plot <- function(plot_data) {
ggplot(data = plot_data) +
geom_histogram(aes(x = value, fill = method), alpha = 0.5, color = "black",
bins = 30, position = "identity") + theme_bw() +
facet_wrap(~key, scale = "free", ncol = 1, labeller = "label_parsed") +
theme(
legend.title = element_blank(),
legend.position = c(1, 0.79),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6),
text = element_text(size = 18)
)
}
eff_comparison_plot <- function(plot_data, x = 0.95, y = 0.98) {
ggplot(data = plot_data,
aes(x = parameter, y = eff, fill = method)) +
geom_bar(stat = "identity", width = 0.3, alpha = 0.8, position = "dodge") +
# facet_wrap(~ parameter, scale = "free", nrow = 1) +
theme_bw() + theme(text = element_text(size = 10)) + coord_flip() +
ylab("ESS / s") + xlab(" ") +
theme(
legend.position = c(x, y),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6)
)
}
|
e9f780038ed35a03c7e13d0a03c0887d1ed9dfec
|
5cdbcc53194772da16c7af453dd6ebb6e108c4b1
|
/metrics/report/report_dockerfile/test.R
|
8c9dabe01e588d2ed3abe2e18215e89b77ddb72b
|
[
"Apache-2.0"
] |
permissive
|
clearlinux/cloud-native-setup
|
591b2ba543db9bb76f19fc4d2e28d535490d0f83
|
9e3697308ee3555aec1b6ee44cd5fb7ecc026946
|
refs/heads/master
| 2023-06-08T15:55:00.573561 | 2022-09-21T16:49:34 | 2022-11-15T21:39:32 | 160,404,934 | 60 | 82 |
Apache-2.0
| 2022-09-21T16:57:42 | 2018-12-04T18:58:55 |
Shell
|
UTF-8
|
R
| false | false | 190 |
r
|
test.R
|
suppressMessages(library(jsonlite)) # to load the data.
options(digits=22)
x=fromJSON('{"ns": 1567002188374607769}')
print(x)
print(fromJSON('{"ns": 1567002188374607769}'), digits=22)
|
70ac8ccd49c776155d9a2e73701ab110b9f894f5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ssdtools/examples/is.fitdists.Rd.R
|
80abb0c2489d40a8424e752f52372b7a6584fd1c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 161 |
r
|
is.fitdists.Rd.R
|
library(ssdtools)
### Name: is.fitdists
### Title: Is fitdists
### Aliases: is.fitdists
### ** Examples
is.fitdists(boron_lnorm)
is.fitdists(boron_dists)
|
c9b2108344fba1718729ca31d29d6c6cc63439de
|
ecf1aa864dfc40840f5b0c98965f7d55875e135f
|
/MODULES/ESTIMATE/areasPlot.R
|
491c4c25067039e56beaba5dd3e4a9bc1672b246
|
[] |
no_license
|
VeenDuco/shinyforcmdstan
|
78b03ec5cd2378ab594ab1a7552a655f70ca3462
|
74da0751f7958d08a969d05c17d168e91a2ecd18
|
refs/heads/main
| 2023-02-09T22:56:25.189305 | 2021-01-04T19:16:23 | 2021-01-04T19:16:23 | 325,535,205 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 5,657 |
r
|
areasPlot.R
|
areasPlotUI <- function(id){
ns <- NS(id)
tagList(
wellPanel(
fluidRow(
column(width = 6,
selectizeInput(
inputId = ns("diagnostic_param"),
label = h5("Parameter"),
multiple = TRUE,
choices = .make_param_list_with_groups(sso),
selected = if(length(sso@param_names) > 9) sso@param_names[1:10] else sso@param_names
)
),
column(width = 4),
column(width = 2, align = "right")
),
fluidRow(
align = "right",
plotOptionsUI(ns("options"))
)
),
plotOutput(ns("plot1")),
checkboxInput(ns("showCaption"), "Show Caption", value = TRUE),
hidden(
uiOutput(ns("caption"))
),
hr(),
# checkboxInput(ns("report"), "Include in report?")
downloadButton(ns('downloadPlot'), 'Download Plot', class = "downloadReport"),
downloadButton(ns('downloadRDS'), 'Download RDS', class = "downloadReport")
)
}
areasPlot <- function(input, output, session){
visualOptions <- callModule(plotOptions, "options", estimatePlots = TRUE,
intervalOptions = TRUE, areasOptions = TRUE)
param <- debounce(reactive(unique(.update_params_with_groups(params = input$diagnostic_param,
all_param_names = sso@param_names))),
500)
include <- reactive(input$report)
observe({
toggle("caption", condition = input$showCaption)
})
plotOut <- function(parameters, plotType){
validate(
need(length(parameters) > 0, "Select at least one parameter.")
)
if(plotType == "Areas"){
out <- mcmc_areas(
sso@posterior_sample[(1 + sso@n_warmup) : sso@n_iter, , ],
pars = parameters,
point_est = tolower(visualOptions()$point_est),
prob = visualOptions()$inner_ci / 100,
prob_outer = visualOptions()$outer_ci / 100,
area_method = visualOptions()$areas_type
)
}
if(plotType == "Ridges"){
out <- mcmc_areas_ridges(
sso@posterior_sample[(1 + sso@n_warmup) : sso@n_iter, , ],
pars = parameters,
prob = visualOptions()$inner_ci / 100,
prob_outer = visualOptions()$outer_ci / 100
)
}
out
}
output$plot1 <- renderPlot({
save_old_theme <- bayesplot_theme_get()
color_scheme_set(visualOptions()$color)
bayesplot_theme_set(eval(parse(text = select_theme(visualOptions()$theme))))
out <- plotOut(parameters = param(), plotType = visualOptions()$areas_ridges)
bayesplot_theme_set(save_old_theme)
out
})
captionOut <- function(parameters){
# HTML(paste0(if(length(parameters) == 1) {"This is an area plot of <i>"} else {"These are area plots of <i>"},
# paste(parameters[1:(length(parameters)-1)], collapse = ", "),
# if(length(parameters) > 1) {"</i> and <i>"},
# if(length(parameters) > 1) {parameters[length(parameters)]},"</i>", ".",
# " The outer edges denote the ", visualOptions()$outer_ci, "% credibility interval.",
# " The inner edges denote the ", visualOptions()$inner_ci, "% credibility interval.",
# if(visualOptions()$point_est != "None") {paste0(" The point estimate denotes the posterior ",
# tolower(visualOptions()$point_est), ".")}
# ))
HTML(paste0("This is an area plot. The outer edges denote the ",
visualOptions()$outer_ci, "% posterior uncertainty interval (credible interval).",
" The inner edges denote the ", visualOptions()$inner_ci, "% interval.",
if(visualOptions()$point_est != "None") {paste0(" The point estimate is the posterior ",
tolower(visualOptions()$point_est), ".")}))
}
output$caption <- renderUI({
captionOut(parameters = param())
})
output$downloadPlot <- downloadHandler(
filename = 'areasPlot.pdf',
content = function(file) {
# ggsave(file, gridExtra::arrangeGrob(grobs = downloadSelection()))
pdf(file)
save_old_theme <- bayesplot_theme_get()
color_scheme_set(visualOptions()$color)
bayesplot_theme_set(eval(parse(text = select_theme(visualOptions()$theme))))
out <- plotOut(parameters = param(), plotType = visualOptions()$areas_ridges)
bayesplot_theme_set(save_old_theme)
print(out)
dev.off()
})
output$downloadRDS <- downloadHandler(
filename = 'areasPlot.rds',
content = function(file) {
save_old_theme <- bayesplot_theme_get()
color_scheme_set(visualOptions()$color)
bayesplot_theme_set(eval(parse(text = select_theme(visualOptions()$theme))))
out <- plotOut(parameters = param(), plotType = visualOptions()$areas_ridges)
bayesplot_theme_set(save_old_theme)
saveRDS(out, file)
})
return(reactive({
if(include() == TRUE){
# customized plot options return without setting the options for the other plots
save_old_theme <- bayesplot_theme_get()
color_scheme_set(visualOptions()$color)
bayesplot_theme_set(eval(parse(text = select_theme(visualOptions()$theme))))
out <- list(plot = plotOut(parameters = param(),
plotType = visualOptions()$areas_ridges),
caption = captionOut(parameters = param()))
bayesplot_theme_set(save_old_theme)
out
} else {
NULL
}
}))
}
|
165fe120eeabeab0d68bbec1649a45a5e9c4c315
|
9531bf05292a40e21835d3e63de124846635fdd0
|
/dataScience_sujet2.4.R
|
2ba2189f0f2255f59b30c643efd0445aa53272be
|
[] |
no_license
|
florinePrat/Projet-data-science
|
865eb8f6ec1a1bd0fac765a2f0f9216ea12c53a9
|
888edcc16f1ffb24b8a9651829295ff1fdd2b95b
|
refs/heads/main
| 2023-02-16T20:49:31.293057 | 2021-01-11T15:17:02 | 2021-01-11T15:17:02 | 315,313,576 | 0 | 0 | null | null | null | null |
ISO-8859-1
|
R
| false | false | 1,665 |
r
|
dataScience_sujet2.4.R
|
# Title : Projet data science
# Objective : TODO
# Created by: Florine | Timi | Axel
# Created on: 23/11/2020
##### test with CSV
dfGlobal <- read.csv2("C:/Users/Axel/Desktop/projetDS/Projet-data-science-main/BddBruteConfinement.csv", header = TRUE, encoding = 'UTF-8')
## Get the stucture of dataframe
str(df)
## Get the summary of dataframe
summary(df)
### head
head(df)
## Question à enlever : 1/2/3/(4)/5/6/(7)/8/9/(11)/12//21/(22)/[23-27]/[28-32]/[43-47]/[54-58]/[59-63]/([64-68])/74/86/88/(123)/(124)/136/137/138
library(FactoMineR)
library(missMDA)
dfColImpact<-read.csv2("C:/Users/Axel/Desktop/projetDS/df_Colonnes_Impactantes.csv", header = TRUE, encoding = 'UTF-8')
dfComplet <- imputeFAMD(dfColImpact,ncp=2)
res <- FAMD(dfColImpact,tab.disj=dfComplet$tab.disj)
#AFC pour savoir quelles sont les colonnes qui impactent le plus la
#tableauKhiTotal[i] contient le retour du test du khi 2 entre la classe et la colonne i
dfQuali<-read.csv2("C:/Users/Axel/Desktop/projetDS/df_Qualitatives.csv", header = TRUE, encoding = 'UTF-8', check.names = FALSE)
classe<-dfQuali[,1]
tableauKhiTotal <- c()
tableauContingence <- c()
for (i in 2:ncol(dfQuali)) {
tableauContingence[[i-1]]<-table(classe,dfQuali[,i])
tableauKhiTotal[[i-1]]<-chisq.test(tableauContingence[[i-1]])
}
#On affiche le numéro et le nom des colonnes dont le test du chi 2 renvoie une p value < 5%
for(i in 1:70){
if(tableauKhiTotal[[i]][["p.value"]]<0.05){
print(paste(i,paste(colnames(dfQuali[i+1]),tableauKhiTotal[[i]][["p.value"]],sep=" ; p-value : " ),sep=". "))
}
}
res<-CA(tableauContingence[[32]])
plot(res)
|
da5e1778d02cb60c8df49ec30336a1ad851ff8df
|
941bcfc6469da42eec98fd10ad1f3da4236ec697
|
/man/track_bearing.Rd
|
ea6f83467fd7f9912547444aeff8480ca7824abd
|
[] |
no_license
|
cran/traipse
|
29c3fd65e98f65049da98b1d878512bfdd93940f
|
01635fd40512f2144e1ce712e0f5912143214e49
|
refs/heads/master
| 2022-10-22T02:59:19.828085 | 2022-10-10T06:40:02 | 2022-10-10T06:40:02 | 236,953,410 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 1,045 |
rd
|
track_bearing.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/track_bearing.R
\name{track_bearing}
\alias{track_bearing}
\title{Track bearing}
\usage{
track_bearing(x, y)
}
\arguments{
\item{x}{longitude}
\item{y}{latitude}
}
\value{
a numeric vector of absolute bearing in degrees, see Details
}
\description{
Calculate sequential bearing on longitude, latitude input vectors. The unit of bearing is degrees.
}
\details{
By convention the last value is set to \code{NA} missing value, because the bearing
applies to the segment extending from the current location.
To use this on multiple track ids, use a grouped data frame with tidyverse code like
\code{data \%>\% group_by(id) \%>\% mutate(turn = track_bearing(lon, lat))}.
Absolute bearing is relative to North (0), and proceeds clockwise positive and anti-clockwise
negative \verb{N = 0, E = 90, S = +/-180, W = -90}.
The last value will be \code{NA} as the bearing is relative to the first point of each segment.
}
\examples{
track_bearing(trips0$x, trips0$y)[1:10]
}
|
29460efddd88558f396e14b091654ceef27fc3cb
|
5a698b4cf5e86426da354a51c5c1582a99b1450a
|
/man/print.occdat.Rd
|
41ad15ba9ceba4b8c79516869f552031f0fa51a7
|
[
"MIT"
] |
permissive
|
jarioksa/spocc
|
721edf2bc0bf5070fcca5dcbea5bb2c558bab5f1
|
1f18a697abdd3d634a1b10e180b9bbab68bcc197
|
refs/heads/master
| 2021-01-15T20:08:46.390092 | 2014-09-10T20:44:51 | 2014-09-10T20:44:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 600 |
rd
|
print.occdat.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{print.occdat}
\alias{print.occdat}
\title{Print brief summary of occ function output}
\usage{
\method{print}{occdat}(x, ...)
}
\arguments{
\item{x}{Input...}
\item{...}{Ignored.}
}
\description{
Print brief summary of occ function output
}
\examples{
\dontrun{
spnames <- c('Accipiter striatus', 'Setophaga caerulescens', 'Spinus tristis')
out <- occ(query = spnames, from = 'gbif', gbifopts = list(hasCoordinate=TRUE))
print(out)
out # gives the same thing
# you can still drill down into the data easily
out$gbif$meta
out$gbif$data
}
}
|
3bfa6e22f5054283363e8c352720616459912c7b
|
c5a08892d45ce23f54771eafe379ed843363f27e
|
/man/changejoint.Rd
|
08a26f057a59ebf4dcb37af471d1e2594a83fa5d
|
[] |
no_license
|
cran/StratigrapheR
|
9a995ea399e97a449bb94a5c8bb239935b108da0
|
aff0937f9ee8d0976fc67a46768b32379cf0274b
|
refs/heads/master
| 2023-07-26T21:02:30.211546 | 2023-07-05T23:14:06 | 2023-07-05T23:14:06 | 163,700,147 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 1,448 |
rd
|
changejoint.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/changejoint.R
\name{changejoint}
\alias{changejoint}
\title{Change the dimensions of bedding joints}
\usage{
changejoint(
joint,
yinv = F,
xinv = F,
yleft = NA,
yright = NA,
ymin = NA,
ymax = NA,
xmin = NA,
xmax = NA
)
}
\arguments{
\item{joint}{the bedding joint to be modified}
\item{yinv, xinv}{whether to inverse the plotting for x and y values (T or F)}
\item{yleft, yright}{the depth/height/time value for the extreme point at the
right or left of the joint (yleft overruns yright, which overruns ymin and
ymax)}
\item{ymin, ymax}{the extreme values for the y axis (in case of conflict with
yleft and/or yright, defaults to the smallest exaggeration)}
\item{xmin, xmax}{the extreme values for the x axis}
}
\description{
Change the dimensions of bedding joints
}
\examples{
# Create an initial litholog ----
l <- c(-2,-1,0,1,2)
r <- c(-1,0,1,2,3)
h <- c(4,3,4,3,4)
i <- c("B1","B2","B3","B4","B5")
log <- litholog(l, r, h, i)
# Get a custom bedding joint to specific dimensions using changejoint() ----
liq <- changejoint(oufti99$liquefaction,
yleft = 0, ymax = 0.3,
xmin = 1, xmax = 2)
nlog <- weldlog(log, dt = 0, seg = list(liq = liq), j = c("liq"))
# Plots for visualisation ----
plot.new()
plot.window(xlim = c(0,5), ylim = c(-2,3))
axis(1)
axis(2)
multigons(nlog$i, nlog$xy, nlog$dt)
}
|
bccd1ad9a488c7e4849d956dc13861ffb9ad3112
|
42fd9b059f4ee5e9a0c043d8813db9b240f53ba0
|
/tests/testthat/test-fpShapesGp.R
|
288fe5521cd584d60e2ed85db10a37de4350f717
|
[] |
no_license
|
X-FLOWERRR/forestplot
|
85db51ccdafcb4a0eccd43276ffd9ad7b02ad0cc
|
b0b25f2c5db6f7ba68de759d7ea275ea0d2886ac
|
refs/heads/master
| 2023-07-15T13:57:25.991719 | 2021-08-25T20:01:16 | 2021-08-25T20:01:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,553 |
r
|
test-fpShapesGp.R
|
library('testthat')
context('fpShapesGp')
test_that("Check fpShapesGp can be used as shapes_gp parameter",
{
expect_silent(
forestplot(labeltext = cbind(Author=c("Smith et al","Smooth et al", "al et al")),
mean=cbind(1:3, 1.5:3.5), lower=cbind(0:2, 0.5:2.5), upper=cbind(4:6,5.5:7.5),
is.summary=c(FALSE,TRUE,FALSE),grid=TRUE,new_page=TRUE,
xticks=c(1,2,3,4,5),
col=fpColors(box="blue",lines="pink", summary="orange",
zero="yellow",
text="gray",
axes="green", hrz_lines="violet"),
hrzl_lines=list(gpar(col="blue",lwd=2),gpar(col="black",lwd=2),gpar(col="blue",lwd=2),gpar(col="black",lwd=2)),
shapes_gp=fpShapesGp(
default=gpar(lineend="square", linejoin="mitre", lwd=3),
lines=list(gpar(lineend="square", linejoin="mitre", lwd=10, col=rgb(0,0.7,0), lty="dotted"),
gpar(lineend="square", linejoin="mitre", lwd=5, col=rgb(0,0.9,0.9), lty="dotted"),
gpar(lwd=8),gpar(lwd=7),
gpar(lwd=6),gpar(lwd=1)
),
vertices=gpar(lty="dotted"),
box=list(gpar(fill="orange", col="red"), gpar(fill="red", col="orange")),
summary=list(gpar(fill="violet", col="gray", lwd=10), gpar(fill="orange", col="gray", lwd=10)),
axes=gpar(col="yellow",lwd=10),
hrz_lines=gpar(col="red",lwd=10, lty="dashed"),
zero=gpar(col="violet",lwd=10,lty="dashed"),
grid=list(gpar(col="blue",lty="dotted",lwd=7), gpar(col="red",lty="dotted",lwd=5), gpar(col="orange",lty="dotted",lwd=3), gpar(col="orange",lty="dotted",lwd=2), gpar(col="orange",lty="dotted",lwd=1))
),
fn.ci_sum=fpDrawBarCI,
fn.ci_norm=fpDrawPointCI,
vertices=TRUE
)
)
})
|
cb8fc58f14f4f13c25639e4d70aa38a72538d491
|
34a991f4b3ecbfcb5b55bf3f6be91b20646863dd
|
/man/digits.Rd
|
41fdb2fe46cbe9c20309a9f026a75d22e2d5724d
|
[] |
no_license
|
cran/RnavGraphImageData
|
fb0cfc9b922c6715ba0f318ad566ad8864a6caeb
|
efebb5f84ba4820f62e5d87ceb991d28c9b3756b
|
refs/heads/master
| 2020-05-17T12:14:59.706496 | 2018-05-15T20:09:03 | 2018-05-15T20:09:03 | 17,693,370 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 318 |
rd
|
digits.Rd
|
\name{digits}
\docType{data}
\alias{digits}
\title{USPS Handwritten Digits}
\description{
8-bit 16x16 grayscale images of "0" through "9"; 1100 examples of each class.
}
\usage{digits}
\format{Data frame with one image per column.}
\source{\url{http://www.cs.nyu.edu/~roweis/data.html}}
\keyword{datasets}
|
1bedc93e2ffa79540cf597276909a4e4d1ffa3f5
|
930a64ae51ba9c4052bcd2b6d4392ff70f98bde8
|
/UniPennState_GeneralizedLinearModels/TwoWayTable/VitaminC_high.R
|
8e64e9886305144e64824ddab738d719f169f516
|
[] |
no_license
|
statisticallyfit/RStatistics
|
1e9f59a1ebef597d4c73f3cf10bed5170126d83b
|
93915cc141c4cb2b465d301d44695b8ce0ad35f8
|
refs/heads/master
| 2020-06-26T02:29:51.113577 | 2019-10-18T18:00:14 | 2019-10-18T18:00:14 | 74,606,344 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 3,716 |
r
|
VitaminC_high.R
|
source('/datascience/projects/statisticallyfit/github/learningstatistics/RStatistics/StatsFormulas.R')
ski <- matrix(c(310, 170, 1090, 1220), ncol=2,
dimnames=list(Treatment=c("Placebo", "VitaminC"),
Cold=c("Cold", "NoCold")))
ski
# Percentage of Row and Col and of Total observations in each cell
## SAME FOR HIGH, LOW, AND MEDIUM data files
percentage <- 100 * ski / sum(ski)
rowSums <- rowSums(ski); rowSums
rowPercentage <- 100 * rbind(ski[1,] / rowSums[1], ski[2,] / rowSums[2])
colSums <- colSums(ski); colSums
colPercentage <- 100 * cbind(ski[,1]/colSums[1], ski[,2]/colSums[2])
percentage
rowPercentage
colPercentage
# Chi-Squared test of Independence with Yates continuity correction
result <- chisq.test(ski); result
result$observed
result$expected
result$residuals
# Chi-Squared test of Independence WITHOUT Yates continuity correction
result <- chisq.test(ski, correct=FALSE); result
result$observed
result$expected
result$residuals
# Likelihood-Ratio Test
likelihoodRatioTest(ski)
# Column 1 Risk Estimates
rowSums
colSums
# placebo_cold / placebo = 31 / 140 = 0.2214
risk1.col1 <- ski[1,1] / rowSums[1]; risk1.col1 <- unname(risk1.col1)
risk1.col1
# vitC_cold / vitC = 17 / 139 = 0.1223
risk2.col1 <- ski[2,1] / rowSums[2]; risk2.col1 <- unname(risk2.col1)
risk2.col1
rho1 <- risk1.col1 / risk2.col1; rho1 <- unname(rho1)
rho1
# placebo and vitC _ COLD / total
total1 <- colSums[1] / sum(rowSums); total1 <- unname(total1)
total1
# 17/139 - 31/140
diff1 <- risk2.col1 - risk1.col1; diff1 # difference of proportions
cold <- rbind(risk1.col1, risk2.col1, total1, diff1)
colnames(cold) <- "Cold"
cold
# Confidence interval of difference in proportions of column 1 (cold)
SE_diff1 <- sqrt(risk1.col1 * (1 - risk1.col1) / unname(rowSums[1]) +
risk2.col1 * (1 - risk2.col1) / unname(rowSums[2]))
SE_diff1
z.crit <- qnorm(0.975); z.crit
CI_diff1 <- cbind(diff1 - z.crit * SE_diff1, diff1 + z.crit*SE_diff1); CI_diff1
# Column 2 risk estimates (No Cold)
# placebo_nocold / placebo = 109/140
risk1.col2 <- ski[1,2] / rowSums[1]; risk1.col2 <- unname(risk1.col2)
risk1.col2
# vitC_nocold / vitC = 122/139
risk2.col2 <- ski[2,2] / rowSums[2]; risk2.col2 <- unname(risk2.col2)
risk2.col2
# nocold / total = 231 / 279
total2 <- colSums[2] / sum(colSums); total2 <- unname(total2)
total2
#122/139 - 109/140
diff2 <- risk2.col2 - risk1.col2; diff2
noCold <- rbind(risk1.col2, risk2.col2, total2, diff2)
colnames(noCold) <- "NoCold"
noCold
# Confidence interval for difference of proportions for col 2 (no cold)
SE_diff2 <- sqrt(risk1.col2*(1-risk1.col2)/unname(rowSums[1]) +
risk2.col2*(1-risk2.col2)/unname(rowSums[2]))
SE_diff2
CI_diff2 <- cbind(diff2 - z.crit*SE_diff2, diff2 + z.crit*SE_diff2); CI_diff2
# Estimate of odds of the two rows
odds1 <- risk2.col1 / risk1.col1; odds1 # 17/139 / 31/140
odds2 <- risk2.col2 / risk1.col2; odds2 # 122/139 / 109/140
# Odds Ratio - cold to no cold
oddsRatio.cold.none <- odds1 / odds2; oddsRatio.cold.none
# Confidence Interval of odds ratio
log_CI <- cbind(log(oddsRatio.cold.none) - z.crit*sqrt(sum(1/ski)),
log(oddsRatio.cold.none) + z.crit*sqrt(sum(1/ski)))
log_CI
oddsRatio.cold.none_CI <- exp(log_CI); oddsRatio.cold.none_CI
##################################################################################
# Using the vcd package
library(vcd)
# to get deviance, pearson chi, and others
assocstats(ski)
likelihoodRatioTest(ski)
chisq.test(ski, correct = FALSE)
# odds ratio, confint
oddsratio(ski, log=FALSE)
lor <- oddsratio(ski); lor
confint(lor) # CI on log scale
exp(confint(lor)) # CI on basic scale
|
489467328a20eae0ec7119bd108f55bf0ed0a88d
|
8ea8dd82beb390c5ae59d32acaf854067e2f310a
|
/tests/testthat/test-4-execution.R
|
77979a6442b79e11b8091f10a08365f8019f31bd
|
[
"MIT"
] |
permissive
|
hongooi73/AzureDSVM
|
91d9f69e8ad30f8d589f49f734422a5d8496e319
|
3553b5581dd640513a37101bb71a8170498f1809
|
refs/heads/master
| 2021-07-06T12:24:35.772109 | 2017-10-02T17:00:31 | 2017-10-02T17:00:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 4,617 |
r
|
test-4-execution.R
|
# test remote execution on a Linux DSVM with specified computing context.
if(interactive()) library("testthat")
library(AzureSMR)
settingsfile <- getOption("AzureSMR.config")
config <- read.AzureSMR.config()
timestamp <- format(Sys.time(), format="%y%m%d%H%M")
context("Remote execution")
asc <- createAzureContext()
with(config,
setAzureContext(asc, tenantID=tenantID, clientID=clientID, authKey=authKey)
)
azureAuthenticate(asc)
# create a new resource group.
resourceGroup_name <- paste0("AzureDSVMtest_", timestamp)
location <- "southeastasia"
res <- azureCreateResourceGroup(asc,
location=location,
resourceGroup=resourceGroup_name)
dsvm_size <- "Standard_D1_v2"
dsvm_name <- paste0("dsvm",
paste(sample(letters, 3), collapse=""))
dsvm_password <- "AzureDSVM_test123"
dsvm_username <- "dsvmuser"
message("Remote execution is via SSH which relies on public key cryptograph.
The test presumes that there is a private key in the user /home/.ssh/
directory. A public key is derived from that private key by using SSH
for authentication purpose.")
# pubkey key extraction.
dsvm_pubkey <- pubkey_gen()
# code to execute.
code <- "x <- seq(1, 500); y <- x * rnorm(length(x), 0, 0.1); print(y)"
temp_script <- tempfile("AzureDSVM_test_execute_", fileext=".R")
temp_script <- gsub("\\\\", "/", temp_script)
file.create(temp_script)
writeLines(code, temp_script)
context("- Remote execution on a single Linux DSVM.")
test_that("remote execution on a single Linux DSVM", {
deployDSVM(asc,
resource.group=resourceGroup_name,
location=location,
hostname=dsvm_name,
username=dsvm_username,
size=dsvm_size,
authen="Key",
pubkey=dsvm_pubkey,
mode="Sync")
res <- executeScript(asc,
resource.group=resourceGroup_name,
hostname=dsvm_name,
remote=paste(dsvm_name,
location,
"cloudapp.azure.com",
sep="."),
username=dsvm_username,
script=temp_script,
compute.context="localSequential")
expect_true(res)
res <- executeScript(asc,
resource.group=resourceGroup_name,
hostname=dsvm_name,
remote=paste(dsvm_name,
location,
"cloudapp.azure.com",
sep="."),
username=dsvm_username,
script=temp_script,
compute.context="localParallel")
expect_true(res)
operateDSVM(asc,
resource.group=resourceGroup_name,
hostname=dsvm_name,
operation="Delete")
})
context("- Remote execution on a cluster of Linux DSVMs.")
test_that("remote execution on a cluster of Linux DSVMs", {
message("Remote execution is via SSH which relies on public key cryptograph.
The test presumes that there is a private key in the user /home/.ssh/
directory. A public key is derived from that private key by using SSH
for authentication purpose.")
deployDSVMCluster(asc,
resource.group=resourceGroup_name,
location=location,
hostname=dsvm_name,
username=dsvm_username,
size=dsvm_size,
authen="Key",
pubkey=dsvm_pubkey,
count=3)
dsvms <- azureListVM(asc,
resourceGroup=resourceGroup_name,
location=location)
dsvm_names <- dsvms$name
dsvm_fqdns <- paste(dsvm_names,
location,
"cloudapp.azure.com",
sep=".")
res <- executeScript(asc,
resource.group=resourceGroup_name,
hostname=dsvm_names,
remote=dsvm_fqdns[1],
master=dsvm_fqdns[1],
slaves=dsvm_fqdns[-1],
username=dsvm_username,
script=temp_script,
compute.context="clusterParallel")
expect_true(res)
})
azureDeleteResourceGroup(asc, resourceGroup = resourceGroup_name)
|
eb661256ca930ee65b532b3018d090f0533c665c
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/jeroen/rgdal/sp_gdal.R
|
6023bb3916c746d58eac8e8a5c58dd4df98d858a
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 21,199 |
r
|
sp_gdal.R
|
GDALinfo <- function(fname, silent=FALSE, returnRAT=FALSE, returnCategoryNames=FALSE, returnStats=TRUE, returnColorTable=FALSE, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=NULL, returnScaleOffset=TRUE, allowedDrivers=NULL, options=NULL) {
if (nchar(fname) == 0) stop("empty file name")
x <- GDAL.open(fname, silent=silent,
allowedDrivers=allowedDrivers, options=options)
d <- dim(x)[1:2]
dr <- getDriverName(getDriver(x))
# p4s <- .Call("RGDAL_GetProjectionRef", x, PACKAGE="rgdal")
p4s <- getProjectionRef(x, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=OVERRIDE_PROJ_DATUM_WITH_TOWGS84)
if (nchar(p4s) == 0) p4s <- as.character(NA)
gt <- .Call('RGDAL_GetGeoTransform', x, PACKAGE="rgdal")
if (attr(gt, "CE_Failure") && !silent)
warning("GeoTransform values not available")
nbands <- .Call('RGDAL_GetRasterCount', x, PACKAGE="rgdal")
mdata <- .Call('RGDAL_GetMetadata', x, NULL, PACKAGE="rgdal")
subdsmdata <- .Call('RGDAL_GetMetadata', x, "SUBDATASETS",
PACKAGE="rgdal")
if (nbands < 1) {
# warning("no bands in dataset")
df <- NULL
} else {
band <- 1:nbands
GDType <- character(nbands)
hasNoDataValues <- logical(nbands)
NoDataValues <- numeric(nbands)
blockSize1 <- integer(nbands)
blockSize2 <- integer(nbands)
if (returnStats) {
Bmin <- rep(as.numeric(NA), nbands)
Bmax <- rep(as.numeric(NA), nbands)
Bmn <- rep(as.numeric(NA), nbands)
Bsd <- rep(as.numeric(NA), nbands)
}
# Pix <- character(nbands)
if (returnRAT) RATlist <- vector(mode="list", length=nbands)
if (returnCategoryNames)
CATlist <- vector(mode="list", length=nbands)
if (returnColorTable)
colTabs <- vector(mode="list", length=nbands)
#RH 4feb2013
if (returnScaleOffset) {
scaleOffset <- matrix(0, ncol=2, nrow=nbands)
colnames(scaleOffset) <- c('scale', 'offset')
}
for (i in seq(along = band)) {
raster <- getRasterBand(x, band[i])
GDType[i] <- .GDALDataTypes[(.Call("RGDAL_GetBandType",
raster, PACKAGE="rgdal"))+1]
bs <- getRasterBlockSize(raster)
blockSize1[i] <- bs[1]
blockSize2[i] <- bs[2]
if (returnStats) {
statsi <- .Call("RGDAL_GetBandStatistics", raster, silent,
PACKAGE="rgdal")
if (is.null(statsi)) {
Bmin[i] <- .Call("RGDAL_GetBandMinimum", raster,
PACKAGE="rgdal")
Bmax[i] <- .Call("RGDAL_GetBandMaximum", raster,
PACKAGE="rgdal")
} else {
Bmin[i] <- statsi[1]
Bmax[i] <- statsi[2]
Bmn[i] <- statsi[3]
Bsd[i] <- statsi[4]
}
}
if (returnRAT) {
RATi <- .Call("RGDAL_GetRAT", raster, PACKAGE="rgdal")
if (!is.null(RATi)) RATlist[[i]] <- RATi
}
if (returnCategoryNames) {
CATi <- .Call("RGDAL_GetCategoryNames", raster,
PACKAGE="rgdal")
if (!is.null(CATi)) CATlist[[i]] <- CATi
}
if (returnColorTable) {
colTabs[[i]] <- getBandColorTable(raster)
}
#RH 4feb2013
if (returnScaleOffset) {
scaleOffset[i,1] <- .Call('RGDAL_GetScale', raster,
PACKAGE="rgdal")
scaleOffset[i,2] <- .Call('RGDAL_GetOffset', raster,
PACKAGE="rgdal")
}
NDV <- .Call("RGDAL_GetBandNoDataValue", raster,
PACKAGE="rgdal")
if (is.null(NDV)) {
hasNoDataValues[i] <- FALSE
} else {
hasNoDataValues[i] <- TRUE
NoDataValues[i] <- NDV[1]
}
# Pix[i] <- .Call("RGDAL_GetBandMetadataItem",
# raster, "PIXELTYPE", "IMAGE_STRUCTURE", PACKAGE="rgdal")
}
df <- data.frame(GDType=GDType, hasNoDataValue=hasNoDataValues,
NoDataValue=NoDataValues, blockSize1=blockSize1,
blockSize2=blockSize2)
if (returnStats) df <- cbind(df, data.frame(Bmin=Bmin,
Bmax=Bmax, Bmean=Bmn, Bsd=Bsd))
}
GDAL.close(x)
# res <- c(rows=d[1], columns=d[2], bands=nbands, ll.x=gt[1], ll.y=gt[4],
# res.x=abs(gt[2]), res.y=abs(gt[6]), oblique.x=abs(gt[3]),
# oblique.y=abs(gt[5]))
### Modified: MDSumner 22 November 2008
cellsize = abs(c(gt[2], gt[6]))
ysign <- sign(gt[6])
offset.y <- ifelse(ysign < 0, gt[4] + ysign * d[1] * abs(cellsize[2]),
gt[4] + abs(cellsize[2]))
res <- c(rows = d[1], columns = d[2], bands = nbands, ll.x = gt[1],
ll.y = offset.y, res.x = abs(gt[2]), res.y = abs(gt[6]),
oblique.x = abs(gt[3]), oblique.y = abs(gt[5]))
#### end modification
attr(res, "ysign") <- ysign
attr(res, "driver") <- dr
attr(res, "projection") <- p4s
attr(res, "file") <- fname
attr(res, "df") <- df
attr(res, "sdf") <- returnStats
attr(res, "mdata") <- mdata
attr(res, "subdsmdata") <- subdsmdata
if (returnRAT) attr(res, "RATlist") <- RATlist
if (returnCategoryNames) attr(res, "CATlist") <- CATlist
if (returnColorTable) attr(res, "ColorTables") <- colTabs
#RH 4feb2013
if (returnScaleOffset) attr(res, "ScaleOffset") <- scaleOffset
class(res) <- "GDALobj"
res
}
print.GDALobj <- function(x, ...) {
cat("rows ", x[1], "\n")
cat("columns ", x[2], "\n")
cat("bands ", x[3], "\n")
cat("lower left origin.x ", x[4], "\n")
cat("lower left origin.y ", x[5], "\n")
cat("res.x ", x[6], "\n")
cat("res.y ", x[7], "\n")
cat("ysign ", attr(x, "ysign"), "\n")
cat("oblique.x ", x[8], "\n")
cat("oblique.y ", x[9], "\n")
cat("driver ", attr(x, "driver"), "\n")
cat("projection ", paste(strwrap(attr(x, "projection")),
collapse="\n"), "\n")
cat("file ", attr(x, "file"), "\n")
if (!is.null(attr(x, "df"))) {
cat("apparent band summary:\n")
print(attr(x, "df")[,1:5])
}
if (attr(x, "sdf")) {
cat("apparent band statistics:\n")
print(attr(x, "df")[,6:9])
}
if (!is.null(attr(x, "ScaleOffset"))) {
somat <- attr(x, "ScaleOffset")
rws <- which(somat[,1] != 1 | somat[,2] != 0)
if (any(rws)) {
cat("ScaleOffset:\n")
rownames(somat) <- paste("band", 1:nrow(somat), sep="")
print(somat[rws,])
}
}
if (!is.null(attr(x, "mdata"))) {
cat("Metadata:\n")
cv <- attr(x, "mdata")
for (i in 1:length(cv)) cat(cv[i], "\n")
}
if (!is.null(attr(x, "subdsmdata"))) {
cat("Subdatasets:\n")
cv <- attr(x, "subdsmdata")
for (i in 1:length(cv)) cat(cv[i], "\n")
}
if (!is.null(attr(x, "RATlist"))) {
RATs <- attr(x, "RATlist")
nRAT <- length(RATs)
if (nRAT == 1 ) cat("Raster attribute table:\n")
else cat("Raster attribute tables (", nRAT, "):\n", sep="")
for (i in 1:nRAT) {
if (i > 1) cat("----------------------\n")
RAT <- RATs[[i]]
print(as.data.frame(RAT))
cat(paste(" types:", paste(attr(RAT, "GFT_type"),
collapse=", ")), "\n")
cat(paste(" usages:", paste(attr(RAT, "GFT_usage"),
collapse=", ")), "\n")
}
}
if (!is.null(attr(x, "CATlist"))) {
CATs <- attr(x, "CATlist")
nCAT <- length(CATs)
cat("Category names:\n")
print(CATs)
}
if (!is.null(attr(x, "ColorTables"))
&& length(attr(x, "ColorTables")) > 0)
cat("Colour tables returned for bands:",
paste(which(sapply(attr(x, "ColorTables"),
function(x) !is.null(x))), collapse=" "), "\n")
invisible(x)
}
asGDALROD_SGDF <- function(from) {
x <- from
d = dim(x)
half.cell <- c(0.5,0.5)
offset <- c(0,0)
output.dim <- d[1:2]
# p4s <- .Call("RGDAL_GetProjectionRef", x, PACKAGE="rgdal")
p4s <- getProjectionRef(x, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=NULL)
if (nchar(p4s) == 0) p4s <- as.character(NA)
gt = .Call('RGDAL_GetGeoTransform', x, PACKAGE="rgdal")
if (attr(gt, "CE_Failure")) warning("GeoTransform values not available")
if (any(gt[c(3,5)] != 0.0)) stop("Diagonal grid not permitted")
data = getRasterData(x, list_out=TRUE)
cellsize = abs(c(gt[2],gt[6]))
ysign <- sign(gt[6])
co.x <- gt[1] + (offset[2] + half.cell[2]) * cellsize[1]
co.y <- ifelse(ysign < 0, gt[4] + (ysign*((output.dim[1] +
offset[1]) + (ysign*half.cell[1]))) * abs(cellsize[2]),
gt[4] + (ysign*((offset[1]) + (ysign*half.cell[1]))) *
abs(cellsize[2]))
cellcentre.offset <- c(x=co.x, y=co.y)
grid = GridTopology(cellcentre.offset, cellsize, rev(output.dim))
# if (length(d) == 2L)
# df = list(band1 = as.vector(data))
# else {
# df <- vector(mode="list", length=d[3])
# df[[1]] <- as.vector(data[,,1, drop = FALSE])
# for (band in 2:d[3])
# df[[band]] <- as.vector(data[,,band, drop = FALSE])
# names(df) = paste("band", 1:d[3], sep="")
# }
return(SpatialGridDataFrame(grid = grid,
data = as.data.frame(data), proj4string=CRS(p4s)))
# data = data.frame(df), proj4string=CRS(p4s)))
}
setAs("GDALReadOnlyDataset", "SpatialGridDataFrame", asGDALROD_SGDF)
asSGDF_GROD <- function(x, offset, region.dim, output.dim, p4s=NULL, ..., half.cell=c(0.5,0.5), OVERRIDE_PROJ_DATUM_WITH_TOWGS84=NULL) {
if (!extends(class(x), "GDALReadOnlyDataset"))
stop("x must be or extend a GDALReadOnlyDataset")
d = dim(x)
if (missing(offset)) offset <- c(0,0)
if (missing(region.dim)) region.dim <- dim(x)[1:2]
odim_flag <- NULL
if (!missing(output.dim)) odim_flag <- TRUE
else {
output.dim <- region.dim
odim_flag <- FALSE
}
# suggestion by Paul Hiemstra 070817
if (is.null(p4s))
# p4s <- .Call("RGDAL_GetProjectionRef", x, PACKAGE="rgdal")
p4s <- getProjectionRef(x, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=OVERRIDE_PROJ_DATUM_WITH_TOWGS84)
if (nchar(p4s) == 0) p4s <- as.character(NA)
gt = .Call('RGDAL_GetGeoTransform', x, PACKAGE="rgdal")
if (attr(gt, "CE_Failure")) warning("GeoTransform values not available")
if (any(gt[c(3,5)] != 0.0)) stop("Diagonal grid not permitted")
data = getRasterData(x, offset=offset,
region.dim=region.dim, output.dim=output.dim, ..., list_out=TRUE)
if (!odim_flag) cellsize = abs(c(gt[2],gt[6]))
else {
icellsize = abs(c(gt[2],gt[6]))
span <- icellsize * rev(d)
cellsize <- span / rev(output.dim)
}
ysign <- sign(gt[6])
co.x <- gt[1] + (offset[2] + half.cell[2]) * cellsize[1]
co.y <- ifelse(ysign < 0, gt[4] + (ysign*((output.dim[1] +
offset[1]) + (ysign*half.cell[1]))) * abs(cellsize[2]),
gt[4] + (ysign*((offset[1]) + (ysign*half.cell[1]))) *
abs(cellsize[2]))
cellcentre.offset <- c(x=co.x, y=co.y)
grid = GridTopology(cellcentre.offset, cellsize, rev(output.dim))
# if (length(d) == 2L)
# df = list(band1 = as.vector(data))
# else {
# df <- vector(mode="list", length=d[3])
# df[[1]] <- as.vector(data[,,1, drop = FALSE])
# for (band in 2:d[3])
# df[[band]] <- as.vector(data[,,band, drop = FALSE])
# names(df) = paste("band", 1:d[3], sep="")
# }
# df1 <- data.frame(df)
df1 <- as.data.frame(data)
data = SpatialGridDataFrame(grid = grid,
data = df1, proj4string=CRS(p4s))
return(data)
}
readGDAL = function(fname, offset, region.dim, output.dim, band, p4s=NULL, ..., half.cell=c(0.5,0.5), silent = FALSE, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=NULL, allowedDrivers=NULL, options=NULL) {
if (nchar(fname) == 0) stop("empty file name")
x = GDAL.open(fname, silent=silent,
allowedDrivers=allowedDrivers, options=options)
d = dim(x)
if (missing(offset)) offset <- c(0,0)
if (missing(region.dim)) region.dim <- dim(x)[1:2] # rows=nx, cols=ny
# else d <- region.dim
odim_flag <- NULL
if (missing(band)) band <- NULL
else {
if (length(band) > 1L) d[3] <- length(band)
else d <- d[1:2]
}
# bug report Mike Sumner 070522
if (!missing(output.dim)) odim_flag <- TRUE
else {
output.dim <- region.dim
odim_flag <- FALSE
}
if (!silent) {
cat(paste(fname, "has GDAL driver", getDriverName(getDriver(x)),"\n"))
cat(paste("and has", d[1], "rows and", d[2], "columns\n"))
}
# suggestion by Paul Hiemstra 070817
if (is.null(p4s))
# p4s <- .Call("RGDAL_GetProjectionRef", x, PACKAGE="rgdal")
p4s <- getProjectionRef(x, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=OVERRIDE_PROJ_DATUM_WITH_TOWGS84)
if (nchar(p4s) == 0) p4s <- as.character(NA)
gt = .Call('RGDAL_GetGeoTransform', x, PACKAGE="rgdal")
if (attr(gt, "CE_Failure")) warning("GeoTransform values not available")
# [1] 178400 40 0 334000 0 -40
opSilent <- get("silent", envir=.RGDAL_CACHE)
assign("silent", silent, envir=.RGDAL_CACHE)
if (any(gt[c(3,5)] != 0.0)) {
data = getRasterTable(x, band=band, offset=offset,
region.dim=region.dim, ...)
GDAL.close(x)
coordinates(data) = c(1,2)
proj4string(data) = CRS(p4s)
} else {
# cellsize = abs(c(gt[2],gt[6]))
if (!odim_flag) cellsize = abs(c(gt[2],gt[6]))
else {
icellsize = abs(c(gt[2],gt[6]))
# bug report Jose M. Blanco Moreno 091004
span <- icellsize * rev(region.dim)
# bug report Mike Sumner 070215
cellsize <- span / rev(output.dim)
}
ysign <- sign(gt[6])
if (ysign > 0)
warning("Y axis resolution positive, examine data for flipping")
# cells.dim = c(d[1], d[2]) # c(d[2],d[1])
# bug report Jose M. Blanco Moreno 091004
co.x <- gt[1] + ((offset[2]/(cellsize[1]/abs(gt[2]))) +
half.cell[2]) * cellsize[1]
co.y <- ifelse(ysign < 0, gt[4] + (ysign*((output.dim[1] +
# bug report Jose M. Blanco Moreno 091004
(offset[1]/(cellsize[2]/abs(gt[6]))) +
(ysign*half.cell[1])))) * abs(cellsize[2]),
gt[4] + (ysign*((offset[1]) + (ysign*half.cell[1]))) *
abs(cellsize[2]))
cellcentre.offset <- c(x=co.x, y=co.y)
# cellcentre.offset = c(x = gt[1] + 0.5 * cellsize[1],
# y = gt[4] - (d[2] - 0.5) * abs(cellsize[2]))
grid = GridTopology(cellcentre.offset, cellsize,
rev(output.dim))
# rev(region.dim))
data = getRasterData(x, band=band, offset=offset,
region.dim=region.dim, output.dim=output.dim, ..., list_out=TRUE)
GDAL.close(x)
# if (length(d) == 2L)
# df = list(band1 = as.vector(data))
# else {
# df <- vector(mode="list", length=d[3])
# df[[1]] <- as.vector(data[,,1, drop = FALSE])
# for (band in 2:d[3])
# df[[band]] <- as.vector(data[,,band, drop = FALSE])
# df = as.data.frame(df)
# names(df) = paste("band", 1:d[3], sep="")
# }
data = SpatialGridDataFrame(grid = grid,
data = as.data.frame(data), proj4string=CRS(p4s))
}
assign("silent", opSilent, envir=.RGDAL_CACHE)
return(data)
}
writeGDAL = function(dataset, fname, drivername = "GTiff", type = "Float32",
mvFlag = NA, options=NULL, copy_drivername = "GTiff",
setStatistics=FALSE, colorTables=NULL, catNames=NULL)
{
if (nchar(fname) == 0) stop("empty file name")
x <- gdalDrivers()
copy_only <- as.character(x[!x$create & x$copy, 1])
if (drivername %in% copy_only) {
tds.create <- create2GDAL(dataset=dataset,
drivername=copy_drivername, type=type,
mvFlag=mvFlag, fname=NULL, setStatistics=setStatistics,
colorTables=colorTables, catNames=catNames)
tds.copy <- copyDataset(tds.create, driver=drivername, fname=fname)
GDAL.close(tds.create)
saveDataset(tds.copy, fname, options=options)
# RSB 120921
GDAL.close(tds.copy)
} else {
tds.out <- create2GDAL(dataset=dataset, drivername=drivername,
type=type, mvFlag=mvFlag, options=options, fname=fname,
setStatistics=setStatistics, colorTables=colorTables,
catNames=catNames)
saveDataset(tds.out, fname, options=options)
# RSB 120921
GDAL.close(tds.out)
}
# RSB 081030 GDAL.close cleanup
# tmp.obj <- saveDataset(tds.out, fname, options=options)
# GDAL.close(tmp.obj)
invisible(fname)
}
create2GDAL = function(dataset, drivername = "GTiff", type = "Float32", mvFlag = NA, options=NULL, fname=NULL, setStatistics=FALSE, colorTables=NULL, catNames=NULL)
{
stopifnot(gridded(dataset))
fullgrid(dataset) = TRUE
if (is.na(match(type, .GDALDataTypes)))
stop(paste("Invalid type:", type, "not in:",
paste(.GDALDataTypes, collapse="\n")))
# mvFlag issues Robert Hijmans 101109
if (is.na(mvFlag)) {
if (type %in% c('Byte', 'UInt16', 'Int16'))
warning(paste("mvFlag=NA unsuitable for type", type))
}
# d.dim = dim(as.matrix(dataset[1])) RSB 081106
gp = gridparameters(dataset)
cellsize = gp$cellsize
offset = gp$cellcentre.offset
dims = gp$cells.dim
d.drv = new("GDALDriver", drivername)
nbands = length(names(slot(dataset, "data")))
if (!is.null(options) && !is.character(options))
stop("options not character")
tds.out = new("GDALTransientDataset", driver = d.drv,
rows = dims[2], cols = dims[1],
bands = nbands, type = type, options = options, fname = fname,
handle = NULL)
gt = c(offset[1] - 0.5 * cellsize[1], cellsize[1], 0.0,
offset[2] + (dims[2] -0.5) * cellsize[2], 0.0, -cellsize[2])
.Call("RGDAL_SetGeoTransform", tds.out, gt, PACKAGE = "rgdal")
p4s <- proj4string(dataset)
if (!is.na(p4s) && nchar(p4s) > 0) {
.Call("RGDAL_SetProject", tds.out, p4s, PACKAGE = "rgdal")
} else {
if (getDriverName(getDriver(tds.out)) == "RST")
stop("RST files must have a valid coordinate reference system")
}
if (!is.null(colorTables)) {
stopifnot(is.list(colorTables))
stopifnot(length(colorTables) == nbands)
if (type != "Byte") {
# colorTables <- NULL
warning("colorTables valid for Byte type only in some drivers")
}
}
if (!is.null(catNames)) {
stopifnot(is.list(catNames))
stopifnot(length(catNames) == nbands)
}
for (i in 1:nbands) {
band = as.matrix(dataset[i])
if (!is.numeric(band)) stop("Numeric bands required")
if (setStatistics) {
statistics <- range(c(band), na.rm=TRUE)
statistics <- c(statistics, mean(c(band), na.rm=TRUE))
statistics <- c(statistics, sd(c(band), na.rm=TRUE))
}
if (!is.na(mvFlag))
band[is.na(band)] = mvFlag
putRasterData(tds.out, band, i)
tds.out_b <- getRasterBand(dataset=tds.out, band=i)
if (!is.na(mvFlag)) {
.Call("RGDAL_SetNoDataValue", tds.out_b, as.double(mvFlag),
PACKAGE = "rgdal")
}
if (setStatistics) {
.gd_SetStatistics(tds.out_b, as.double(statistics))
}
if (!is.null(colorTables)) {
icT <- colorTables[[i]]
if (!is.null(icT)) {
.gd_SetRasterColorTable(tds.out_b, icT)
}
}
if (!is.null(catNames)) {
icN <- catNames[[i]]
if (!is.null(icN)) {
.gd_SetCategoryNames(tds.out_b, icN)
}
}
}
tds.out
}
gdalDrivers <- function() getGDALDriverNames()
toSigned <- function(x, base) {
if (any(x < 0)) stop("already signed")
if (storage.mode(x) != "integer") stop("band not integer")
b_2 <- (2^(base-1)-1)
b <- 2^base
x[x > b_2] <- x[x > b_2] - b
as.integer(x)
}
toUnSigned <- function(x, base) {
if (all(x >= 0)) stop("already unsigned")
if (storage.mode(x) != "integer") stop("band not integer")
b <- 2^base
x[x < 0] <- x[x < 0] + b
as.integer(x)
}
"GDALSpatialRef" <- function(fname, silent=FALSE, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=NULL, allowedDrivers=NULL, options=NULL) {
if (nchar(fname) == 0) stop("empty file name")
x <- GDAL.open(fname, silent=silent,
allowedDrivers=allowedDrivers, options=options)
# p4s <- .Call("RGDAL_GetProjectionRef", x, PACKAGE="rgdal")
p4s <- getProjectionRef(x, OVERRIDE_PROJ_DATUM_WITH_TOWGS84=OVERRIDE_PROJ_DATUM_WITH_TOWGS84)
GDAL.close(x)
p4s
}
|
a03e53f1d3f7152397d0d1ada1deda17800253d8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pracma/examples/flipdim.Rd.R
|
cdfc1a6f53df2b5dafb42b88473ab4b8d99d3f07
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 280 |
r
|
flipdim.Rd.R
|
library(pracma)
### Name: flipdim
### Title: Matrix Flipping (Matlab Style)
### Aliases: flipdim flipud fliplr circshift
### Keywords: manip
### ** Examples
a <- matrix(1:12, nrow=3, ncol=4, byrow=TRUE)
flipud(a)
fliplr(a)
circshift(a, c(1, -1))
v <- 1:10
circshift(v, 5)
|
28627916f3878743df6fb3e175e80f9b172dd09a
|
14553970249fcf633c25e13d84259ad608220233
|
/man/logregtree.Rd
|
6c57bc5e64e5662520565175ef3ccf16a1ad09f2
|
[] |
no_license
|
cran/LogicReg
|
c1d0cbd785af6cf95a73a796f561d60fdd872a73
|
73bb7739987b1884d21f976d7e534cef6cebb8e4
|
refs/heads/master
| 2023-08-30T21:07:25.420747 | 2023-08-08T23:10:10 | 2023-08-09T01:42:24 | 17,691,856 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 4,444 |
rd
|
logregtree.Rd
|
\name{logregtree}
\alias{logregtree}
\title{Format of class logregtree}
\description{This help file contains a description of the format of
class logregtree. }
\usage{logregtree()}
\value{
An object of class logregtree is typically a substructure of an object
of the class \code{logregmodel}. It will typically be the result of
using the fitting function \code{logreg}. An object of class
logictree has the following components:
\item{whichtree}{the sequence number of the current tree within the
model.}
\item{coef}{the coefficients of this tree.}
\item{trees}{a matrix (data.frame) with five columns; see below for
the format.}}
\details{
When storing trees, we number the location of the nodes using the
following scheme (this is an example for a tree with at most 8
\emph{terminal} nodes, but the generalization should be obvious):
\tabular{ccccccccccccccc}{
\tab \tab \tab \tab \tab \tab \tab 1\tab \tab \tab \tab \tab \tab \tab \cr
\tab \tab \tab 2\tab \tab \tab \tab \tab \tab \tab \tab 3\tab \tab \tab \cr
\tab 4\tab \tab \tab \tab 5\tab \tab \tab \tab 6\tab \tab \tab \tab 7\tab \cr
8\tab \tab 9\tab \tab 10\tab \tab 11\tab \tab
12\tab \tab 13\tab \tab 14\tab \tab 15\cr
}
Each node may or may not be present in the current tree. If it is
present, it can contain an operator (``and'' or ``or''), in which case
it has to child nodes, or it can contain a variable, in which case the
node is a terminal node. It is also possible that the node does not
exist (as the user only specifies the maximum tree size, not the tree
size that is actually fitted).
Output files have one line for each node. Each line contains 5
numbers:
\enumerate{
\item
the node number.
\item
does this node contain an ``and'' (1), an ``or'' (2), a variable (3),
or is the node empty (0).
\item
if the node contains a variable, which one is it; e.g. if this number
is 3 the node contains X3.
\item
if the node contains a variable, does it contain the regular variable
(0) or its complement (1)
\item
is the node empty (0) or not (1) (this information is redundant with
the second number)}
\bold{Example}
\tabular{ccccccccccccccc}{
\tab \tab \tab \tab \tab \tab \tab AND\tab \tab \tab \tab \tab \tab \tab \cr
\tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \cr
\tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \cr
\tab \tab \tab OR\tab \tab \tab \tab \tab \tab \tab OR\tab \tab \tab \tab \cr
\tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \cr
\tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \cr
\tab OR\tab \tab \tab \tab OR\tab \tab \tab \tab X20\tab \tab \tab \tab OR\tab \cr
\tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \tab \cr
X17\tab \tab X12\tab \tab X3\tab \tab
X13c\tab \tab \tab \tab \tab \tab X2\tab \tab X1\cr
}
is represented as
\tabular{rrrrr}{
1 \tab 1 \tab 0 \tab 0 \tab 1\cr
2 \tab 2 \tab 0 \tab 0 \tab 1\cr
3 \tab 2 \tab 0 \tab 0 \tab 1\cr
4 \tab 2 \tab 0 \tab 0 \tab 1\cr
5 \tab 2 \tab 0 \tab 0 \tab 1\cr
6 \tab 3 \tab 20 \tab 0 \tab 1\cr
7 \tab 2 \tab 0 \tab 0 \tab 1\cr
8 \tab 3 \tab 17 \tab 0 \tab 1\cr
9 \tab 3 \tab 12 \tab 0 \tab 1\cr
10 \tab 3 \tab 3 \tab 0 \tab 1\cr
11 \tab 3 \tab 13 \tab 1 \tab 1\cr
12 \tab 0 \tab 0 \tab 0 \tab 0\cr
13 \tab 0 \tab 0 \tab 0 \tab 0\cr
14 \tab 3 \tab 2 \tab 0 \tab 1\cr
15 \tab 3 \tab 1 \tab 0 \tab 1\cr
}
}
\references{
Ruczinski I, Kooperberg C, LeBlanc ML (2003). Logic Regression,
\emph{Journal of Computational and Graphical Statistics}, \bold{12}, 475-511.
Ruczinski I, Kooperberg C, LeBlanc ML (2002). Logic Regression -
methods and software. \emph{Proceedings of the MSRI workshop on
Nonlinear Estimation and Classification} (Eds: D. Denison, M. Hansen,
C. Holmes, B. Mallick, B. Yu), Springer: New York, 333-344.
Selected chapters from the dissertation of Ingo Ruczinski, available from
\url{https://research.fredhutch.org/content/dam/stripe/kooperberg/ingophd-logic.pdf}}
\author{
Ingo Ruczinski \email{[email protected]} and
Charles Kooperberg \email{[email protected]}.
}
\seealso{
\code{\link{logreg}},
\code{\link{plot.logregtree}},
\code{\link{print.logregtree}},
\code{\link{logregmodel}}
}
\examples{
logregtree() # displays this help file
help(logregtree) # equivalent
}
\keyword{logic}
\keyword{methods}
\keyword{nonparametric}
\keyword{tree}
|
ae79ba080d7bbdcd43364754d4f0461901c080e1
|
5f82d1bc22e4ef72a63c58852a2d035e124f1a37
|
/tests/testthat/test_last_n.R
|
5675633bcb39362f933bce31771597594992b26d
|
[] |
no_license
|
cran/bupaR
|
75608804ef045f678821740aaff123991d5d36b5
|
ef020af22301e7aa8c82d62e4d01dd5aebaea99e
|
refs/heads/master
| 2023-04-20T17:49:49.645967 | 2023-04-02T21:00:06 | 2023-04-02T21:00:06 | 86,215,725 | 0 | 3 | null | null | null | null |
UTF-8
|
R
| false | false | 2,932 |
r
|
test_last_n.R
|
#### eventlog ####
test_that("test last_n on eventlog", {
load("./testdata/patients.rda")
last <- patients %>%
last_n(n = 2)
instances <- patients %>%
filter(!!activity_instance_id_(.) %in% c("11", "12")) %>%
nrow()
expect_s3_class(last, "eventlog")
expect_equal(dim(last), c(instances, ncol(patients)))
expect_equal(colnames(last), colnames(patients))
# `last` should contain last 2 activity instances
expect_equal(last[[activity_instance_id(last)]], c("12", "11"))
# Ensure that last 2 activity instances are completely present in `last`
expect_equal(instances, 2)
})
test_that("test last_n on grouped_eventlog", {
load("./testdata/patients_grouped.rda")
last <- patients_grouped %>%
last_n(n = 2)
instances <- patients_grouped %>%
filter(!!activity_instance_id_(.) %in% c("5", "6", "10", "11", "12")) %>%
nrow()
expect_s3_class(last, "grouped_eventlog")
# Events: 3 (John Doe) + 3 (Jane Doe) + 1 (George Doe)
expect_equal(dim(last), c(instances, ncol(patients_grouped)))
expect_equal(colnames(last), colnames(patients_grouped))
# `last` should contain last 2 activity instances, per group (patient)
expect_equal(last[[activity_instance_id(last)]], c("12","10", "10", "11", "5","5","6"))
# Ensure that last 2 activity instances per group (patient) are completely present in `last`
expect_equal(instances, 7)
})
#### activitylog ####
test_that("test last_n on activitylog", {
load("./testdata/patients_act.rda")
last <- patients_act %>%
last_n(n = 3)
# complete is always present and last event per activity instance, so this works too
ordered <- patients_act %>%
arrange(.data[["complete"]]) %>%
tail(n = 3)
expect_s3_class(last, "activitylog")
expect_equal(dim(last), c(3, ncol(patients_act)))
expect_equal(colnames(last), colnames(patients_act))
# `last` should equal to the last 3 rows of `patients_act`, except for the 7th column (.order)
expect_equal(last[, -7], ordered[, -7])
})
test_that("test last_n on grouped_activitylog", {
load("./testdata/patients_act_grouped.rda")
skip("TODO: rewrite ordered fails")
last <- patients_act_grouped %>%
last_n(n = 3)
# complete is always present and last event per activity instance, so this works too
ordered <- patients_act_grouped %>%
slice_max(order_by = .data[["complete"]], n = 3) %>%
arrange(.data[["complete"]])
expect_s3_class(last, "grouped_activitylog")
# Activities: 3 (John Doe) + 3 (Jane Doe) + 1 (George Doe)
expect_equal(dim(last), c(7, ncol(patients_act_grouped)))
expect_equal(colnames(last), colnames(patients_act_grouped))
# `last` should equal to the last 3 rows per group of `patients_act_grouped`, except for the 7th column (.order)
expect_equal(tibble::as_tibble(last[, -7]), tibble::as_tibble(ordered[, -7]))
})
|
6826dc0135ec978abb9fe3f1c40872739b97c7fa
|
646f4be0623653e8e9cf4d701a15fa318eda9824
|
/tests/testthat/test-recently-added.R
|
13dab52667c5345fd069898086ee6a5463aca9e3
|
[
"MIT"
] |
permissive
|
jemus42/tauturri
|
dc24b37136cd88c97ad79babf38c71b2043b84a0
|
2f23895985d962f18b1d9ea3977fefdfbca714f0
|
refs/heads/master
| 2022-09-24T09:02:36.203045 | 2022-09-18T16:44:40 | 2022-09-18T16:44:40 | 121,064,812 | 1 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 259 |
r
|
test-recently-added.R
|
context("test-recently-added.R")
test_that("get_recently_added works", {
count <- 5
res <- get_recently_added(count = count)
expect_is(res, "tbl")
expect_length(res, 42)
expect_equal(nrow(res), count)
expect_error(get_recently_added("", ""))
})
|
4880cc79284c1f24f2477ad6f06e8217e45b5325
|
d741d22e89b3c036276cc75378f25ab4d5df2f67
|
/code/exploration.R
|
7370d1b69f1b4610fc54020d6fef86b7b2c90211
|
[] |
no_license
|
andyhoegh/NCAA
|
f58e98602d434ec98c7304f69d224ceddf88863c
|
4d110fdd45fa029cea3edf59a53521293f92bb34
|
refs/heads/master
| 2021-01-01T16:49:43.200349 | 2015-01-07T18:04:43 | 2015-01-07T18:04:43 | 16,755,168 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,822 |
r
|
exploration.R
|
source("scoring_code.R")
# This gets the win rate for all teams over all given seasons
id = 501:856
nteams = length(id)
regular_season_results = read.csv("~/regular_season_results.csv")
nwins = numeric(length(id))
nlosses = numeric(length(id))
for(i in 1:nteams){
nwins[i] = sum(regular_season_results$wteam == id[i])
nlosses[i] = sum(regular_season_results$lteam == id[i])
}
propwins = nwins / (nwins + nlosses)
propwins[is.nan(propwins)] = .5
id_propwins = data.frame(id, propwins)
# This is a naive predictor. The probability a team wins a matchup is its win % divided by its win % + its opponent's
predict.years <-c('N','O','P','Q','R')
tourney_results <- read.csv('C:\\Users\\Ian\\Desktop\\Research\\NCAA\\data\\tourney_results.csv')
tourney_results.tmp <- tourney_results[!(tourney_results$daynum %in% c(134,135)),] # exclude play in
tourney_results_predyears <- tourney_results[tourney_results$season %in% predict.years,]
all_matchups <- NULL
for (k in 1:length(predict.years)){
active.year <- tourney_results_predyears[tourney_results_predyears$season == predict.years[k] ,]
teams <- sort(unique(c(active.year$wteam,active.year$lteam)))
for (i in 1:(length(teams)-1)){
for (j in (i+1):length(teams)){
all_matchups <- c(all_matchups, paste(predict.years[k],teams[i],teams[j],sep='_'))
}
}
}
pred = numeric(length(all_matchups))
for(i in 1:length(all_matchups)){
team1 = as.numeric(strsplit(all_matchups[i], "_")[[1]][2])
team2 = as.numeric(strsplit(all_matchups[i], "_")[[1]][3])
prop1 = id_propwins[id_propwins$id == team1, 2]
prop2 = id_propwins[id_propwins$id == team2, 2]
pred = prop2 / (prop1 + prop2)
}
sample.submission <- data.frame(all_matchups,pred)
colnames(sample.submission) = c('id','pred')
Score.NCAA(tourney_results,predict.years,sample.submission)
|
79066c87346372b53ae19826e9ba73275d8d8d94
|
528f00fe5ccc8d13132caf0e38cfe362b0ae20e4
|
/R function/pml.R
|
9bacdbe691b13c3f1cdf718d8f2cadb13a2bc861
|
[] |
no_license
|
MingchenInSZ/practicalmachinelearning
|
72c805e6d7e90e60699c988cc380e65a14985da4
|
a14227e5fc060ceae7b227545adacb44c8116a6c
|
refs/heads/master
| 2016-09-06T10:35:19.836091 | 2014-06-22T15:46:39 | 2014-06-22T15:46:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,262 |
r
|
pml.R
|
pml<-function()
{
library(caret)
curdir<-getwd()
trainfile<-paste(curdir,"/pml-training.csv",sep="")
testfile<-paste(curdir,"/pml-testing.csv",sep="")
training <-read.table(trainfile,header=T,sep=",")
testing <- read.table(testfile,header=T,sep=",")
rs<-c()
rsc<-c()
for(name in names(training))
{
r<-c()
for(var in as.list(training[name]))
{
r<-append(r,as.numeric(nchar(as.character(var))))
}
rs<-append(rs,sum(r==0))
rsc<-append(rsc,sum(is.na(training[name])))
}
inds<-c()
for(i in 1:length(rs))
{
if(rs[i]>19622*0.7)
{
inds<-append(inds,i)
}
}
indsc<-c()
for(i in 1:length(rsc))
{
if(rsc[i]>19622*0.7)
{
indsc<-append(indsc,i)
}
}
lost_ind<-union(inds,indsc)# the most lost record column index
lost_ind<-append(lost_ind,c(1:3))
training<-subset(training,select=-lost_ind)
testing<-subset(testing,select=-lost_ind)
c_v<-createDataPartition(training$classe,p=0.3,list=F)
traindata<-training[-c_v,]
cross_valid<-training[c_v,]
fit<-train(classe~.,data=traindata,method="gbm")
pred<-predict(fit,newdata=cross_valid)
print(sum(pred==cross_valid$classe)/dim(cross_valid)[1])
p_test<-predict(fit,newdata=testing)
p_test
}
|
0527d29e945830bcabc7cf8d5cf473d82d2f2fb4
|
2b66528ea70115d88464fb90179365542e7313be
|
/auc_functions.R
|
a55280e7538767abcfad27cc6ce21fbaede9e4f8
|
[] |
no_license
|
pavanjuturu/Numerai
|
bd92f5a82fd60a75e8b1629f9089eb78672429af
|
0b66770d83838b78767376c23703eab4c0f76ad5
|
refs/heads/master
| 2020-05-22T19:04:50.872296 | 2016-02-10T20:50:53 | 2016-02-10T20:50:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 737 |
r
|
auc_functions.R
|
auc <- function(outcome, proba){
outcome <- as.vector(outcome)
proba <- as.vector(proba)
N <- length(proba)
N_pos <- sum(outcome)
df <- data.frame(out = outcome, prob = proba)
df <- df[order(-df$prob),]
df$above <- (1:N) - cumsum(df$out)
return( 1- sum( df$above * df$out ) / (N_pos * (N-N_pos) ) )
}
##function for
auc.gbm <- function(actual, dtrain) {
preds <- as.vector(getinfo(dtrain, "label"))
outcome <- as.vector(actual)
N <- length(preds)
N_pos <- sum(outcome)
df <- data.frame(out = outcome, prob = preds)
df <- df[order(-df$prob),]
df$above <- (1:N) - cumsum(df$out)
auc <- ( 1- sum( df$above * df$out ) / (N_pos * (N-N_pos) ) )
return(list(metric = "AUC", value = auc))
}
|
905ef0f1d9699be896f50e88046c9530a11bae8e
|
b0670f8484d05498938b7a9770318857e2de5527
|
/plot2.R
|
1aaa317cf318b485874ea4930fdc1ec9a6d6cef5
|
[] |
no_license
|
akolchin/ExData_Plotting1
|
eeaa36befc6a7ffda584e0e38a16eaa90608beb8
|
b0a24c30d0834533ca576ca3d288331621444e73
|
refs/heads/master
| 2021-01-18T02:52:33.055350 | 2014-05-10T12:37:59 | 2014-05-10T12:37:59 | 19,465,353 | 0 | 1 | null | null | null | null |
UTF-8
|
R
| false | false | 948 |
r
|
plot2.R
|
plot2 <- function() {
## load and unzip
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="data.zip")
unzip("data.zip")
## load and prepare datas
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", dec = ".", na.strings="?", stringsAsFactors=FALSE)
## subset to just two target days in February, 2007
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007", ]
## add datetime column
data$datetime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %T")
## open PNG and plot
png(filename="plot2.png", width=480, height=480, units="px")
plot(x=data$datetime, y=data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
}
|
cb172ae89877e4ae97a8ea122391f1e9a651ae3c
|
3d1ec18944e584c2f00e2b9902dcaaccb79c8c41
|
/R/qmosaic.R
|
41bbf2660192c9bfa8dd6d89177e75a28e74a107
|
[] |
no_license
|
tudou2015/cranvas
|
a84ffebf61fac235959cefb8acbd4c7bdb0d7d58
|
af082a2a1cb09d42ca95c0021f8046df5054240f
|
refs/heads/master
| 2020-06-10T18:52:07.441557 | 2015-03-13T17:03:09 | 2015-03-13T17:03:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 20,862 |
r
|
qmosaic.R
|
constructCondition <- function (hdata) {
library(reshape2)
library(plyr)
hdata$ID <- 1:nrow(hdata)
res.melt <- melt(hdata,id.var="ID")
res.melt$cond <- with(res.melt, sprintf("(%s == '%s')", variable, value))
NAs <- which(is.na(res.melt$value))
res.melt$cond[NAs] <- sprintf("is.na(%s)", res.melt$variable[NAs])
condis <- ddply(res.melt, .(ID), summarize, condi = paste("(", paste(cond, collapse=" & "), ")"))
paste(condis$condi, collapse="|")
}
paste_formula <- function(form) {
# form has pieces wt, marg and cond
# output is character - needs to be converted to formula afterwards
wtStr <- ""
if (length(form$wt) > 0)
wtStr <- form$wt[1]
margStr <- "1"
if (length(form$marg) > 0)
margStr <- paste(form$marg,collapse="+")
condStr <- ""
if (length(form$cond) > 0)
condStr <- paste("|", paste(form$cond, collapse="+"))
formstring <- paste(wtStr,"~", margStr, condStr)
return(formstring)
}
find_x_label <- function(form, divider) {
parsed <- productplots::parse_product_formula(form)
vars <- c(parsed$marg, parsed$cond)
xlabs <- rev(vars[grep("h",divider)])
paste(xlabs,"", collapse="+ ")
}
find_y_label <- function(form, divider) {
parsed <- productplots::parse_product_formula(form)
vars <- c(parsed$marg, parsed$cond)
ylabs <- rev(vars[grep("v",divider)])
paste(ylabs,"", collapse="+ ")
}
settitle <- function(form) {
# browser()
if (!is.null(form))
paste_formula(productplots::parse_product_formula(form))
}
extractVars <- function(form) {
setdiff(unlist(parse_product_formula(form)), "1")
}
##' Mosaic plot.
##' Create a mosaicplot using a formula (as described in prodplot)
##'
##' Interactive elements for mosaic plots are arrow keys for navigating through the mosaic hierarchy:
##' arrow up reduces complexity of the mosaic by one variable, arrow down increases the complexity by one, if possible.
##' Arrow left and right rotate a previously included variable into the last split position.
##' Conditioning/Unconditioning is done with keys 'C' and 'U'
##' Keys 'B' and 'S" switch to bar and spine representation, respectively
##' Key 'R' rotates the last split variable between horizontal and vertical display.
##'
##' @param data a mutaframe which is typically built upon a data frame
##' along with several row attributes
##' @param formula a formula to describe order in which variables go into the mosaicplot. The first variables are the ones visually most important, i.e. Y ~ X1 + X2 + X3 first splits according to X3, then according to X2, then according to X1
##' @param divider structure of the split in each direction. Choices are "hbar", "vbar" for horizontal/vertical barcharts, "hspine", "vspine" for horizontal/vertical spineplots.
##' @param cascade parameter for prodplot in package productplots
##' @param scale_max parameter for prodplot in package productplots
##' @param na.rm handling of missing values, defaults to FALSE
##' @param subset parameter for prodplot -
##' @param colour fill colour of rectangles - only used if colour is not used in the data
##' @param main parameter for prodplot
##' @param ...
##' @return NULL
##' @author Heike Hofmann
##' @export
##' @example inst/examples/qmosaic-ex.R
qmosaic <- function(data, formula, divider = productplots::mosaic(), cascade = 0, scale_max = TRUE, na.rm = FALSE, subset=NULL, colour="grey30", main=NULL, ...) {
data = check_data(data)
b = brush(data)
b$select.only = TRUE; b$draw.brush = FALSE # a selection brush
z = as.list(match.call()[-1])
s = attr(data, 'Scales')
var = extractVars(z$formula)
redoHiliting <- FALSE
redoColor <- FALSE
meta =
Mosaic.meta$new(var=var, form = as.formula(z$formula), origForm=as.formula(z$formula),
xlim=c(0,1), ylim=c(0,1), alpha = 1,
inactiveVar=NULL, inactiveDivider=NULL,
active = TRUE, main=settitle(z$formula), ylab="", xlab="", main=main)
if(is.null(divider)) divider = mosaic()
if (!is.character(divider)) {
form = parse_product_formula(z$formula)
splits = c(form$marg, form$cond)
divider = divider(length(splits))
}
meta$divider = divider
meta$origDivider = meta$divider
recalcColor = function() {
redoColor <<-FALSE
idx = visible(data)
if (sum(idx) > 0) {
df <- data.frame(data[idx,])
form <- parse_product_formula(meta$form)
df$wt <- 1
if (length(form$wt) == 1) df$wt <- df[,form$wt]
var <- unlist(c(form$marg, form$cond))
cols <- ddply(df, var, function(x) {
dc <- xtabs(wt~.color, data=x, exclude=NULL)/sum(x$wt)
dc[is.nan(dc)] <- 0
dc
})
require(reshape2)
cm <- melt(cols, id.var=var)
names(cm) <- gsub("variable", ".color",names(cm))
names(cm) <- gsub("value", "cval",names(cm))
colID <- grep(".color", names(meta$mdata))
if (length(colID) >0) meta$mdata <- meta$mdata[-colID]
meta$cdata <- merge(meta$mdata, cm, by=var)
## set order of the colors here
# browser()
meta$cdata$cid <- as.numeric(meta$cdata$.color)
meta$cdata <- ddply(meta$cdata, var, transform,
cval=cumsum(cval[order(cid)]),
.color=.color[order(cid)])
} else {
meta$cdata <- meta$mdata
meta$cid <- as.numeric(meta$cdata$.color)
meta$cdata$cval <- 0
}
split <- meta$divider[1]
if (length(grep("v", split))>0) split <- "hspine"
else split <- "vspine"
if (split =="vspine") {
meta$cdata$t = with(meta$cdata, b + (t-b)*cval)
meta$cdata <- ddply(meta$cdata, var, transform,
b = c(b[1], t[-length(t)]))
} else {
meta$cdata$r = with(meta$cdata, l + (r-l)*cval)
meta$cdata <- ddply(meta$cdata, var, transform,
l = c(l[1], r[-length(r)]))
}
}
recalcHiliting = function() {
redoHiliting <<-FALSE
idx = visible(data)
if (sum(idx) > 0) {
df <- data.frame(data[idx,])
form <- parse_product_formula(meta$form)
df$wt <- 1
if (length(form$wt) == 1) df$wt <- df[,form$wt]
var <- unlist(c(form$marg, form$cond))
hils <- ddply(df, var, summarize, hilited = sum(wt[.brushed])/sum(wt))
hils$hilited[is.nan(hils$hilited)] <- 0
hilID <- grep("hilited", names(meta$mdata))
if (length(hilID) >0) meta$mdata <- meta$mdata[-hilID]
meta$hdata <- merge(meta$mdata, hils, by=var)
} else {
meta$hdata <- meta$mdata
meta$hdata$hilited <- 0
}
split <- meta$divider[1]
if (length(grep("v", split))>0) split <- "hspine"
else split <- "vspine"
if (split =="vspine") {
meta$hdata$t = with(meta$hdata, b + (t-b)*hilited)
} else
meta$hdata$r = with(meta$hdata, l + (r-l)*hilited)
}
setylab <- function() {
parsed <- parse_product_formula(meta$form)
vars <- c(parsed$marg, parsed$cond)
yvars <- rev(vars[grep("v",meta$divider)])
meta$yat = seq(0,1, length=5)
meta$ylabels = round(seq(0,1, length=5),2)
if (length(yvars)>=1) {
yvar <- yvars[1]
df <- subset(meta$mdata, l==0)
at <- ddply(df, yvar, summarize, yat=(min(b)+max(t))/2)
meta$yat = at$yat
meta$ylabels = at[,1]
}
}
setxlab <- function() {
parsed <- parse_product_formula(meta$form)
vars <- c(parsed$marg, parsed$cond)
xvars <- rev(vars[grep("h",meta$divider)])
meta$xat = seq(0,1, length=5)
meta$xlabels = round(seq(0,1, length=5), 2)
if (length(xvars)>=1) {
xvar <- xvars[1]
# browser()
df <- subset(meta$mdata, b==0)
at <- ddply(df, xvar, summarize, xat=(min(l)+max(r))/2)
meta$xat = at$xat
meta$xlabels = at[,1]
}
}
recalc = function() {
idx = visible(data)
df <- data.frame(data[idx,])
mdata <- prodcalc(df, meta$form, meta$divider, cascade, scale_max, na.rm = na.rm)
meta$mdata <- subset(mdata, level==max(mdata$level), drop=FALSE)
meta$xlab <- find_x_label(meta$form, meta$divider)
meta$ylab <- find_y_label(meta$form, meta$divider)
setxlab()
setylab()
recalcColor()
recalcHiliting()
}
compute_coords = function() {
meta$limits = extend_ranges(cbind(meta$xlim, meta$ylim))
meta$minor = "xy"
recalc()
}
compute_coords()
recalcColor()
removeSplit = function() {
form = parse_product_formula(meta$form)
if (length(form$marg) > 1) {
meta$inactiveVar <- c(form$marg[1], meta$inactiveVar)
meta$inactiveDivider <- c(meta$divider[1], meta$inactiveDivider)
form$marg <- form$marg[-1]
meta$divider <- meta$divider[-1]
}
else return()
# if (length(form$marg) == 1) {
# if (form$marg[1] == "1") return()
# else {
# meta$inactiveVar <- c(form$marg[1], meta$inactiveVar)
# form$marg[1] = "1"
# }
#
# }
meta$form <- as.formula(paste_formula(form))
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
addSplit = function() {
form = parse_product_formula(meta$form)
if(length(meta$inactiveVar) < 1) return()
if ((length(form$marg) == 0) | (form$marg[1] == "1")) {
form$marg[1] <- meta$inactiveVar[1]
meta$inactiveVar <- meta$inactiveVar[-1]
} else {
form$marg <- c( meta$inactiveVar[1], form$marg)
meta$inactiveVar <- meta$inactiveVar[-1]
lastSplit <- length(form$marg)
meta$divider <- c(meta$inactiveDivider[1], meta$divider)
meta$inactiveDivider = meta$inactiveDivider[-1]
}
meta$form <- as.formula(paste_formula(form))
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
rotateLeft = function() {
form = parse_product_formula(meta$form)
if(length(meta$inactiveVar) < 1) return()
if ((length(form$marg) == 0) | (form$marg[1] == "1")) {
form$marg[1] <- meta$inactiveVar[1]
meta$inactiveVar <- meta$inactiveVar[-1]
} else {
save <- form$marg[1]
form$marg[1] <- meta$inactiveVar[1]
meta$inactiveVar <- c(meta$inactiveVar[-1], save)
}
meta$form <- as.formula(paste_formula(form))
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
rotateRight = function() {
form = parse_product_formula(meta$form)
if(length(meta$inactiveVar) < 1) return()
if ((length(form$marg) == 0) | (form$marg[1] == "1")) {
form$marg[1] <- meta$inactiveVar[1]
meta$inactiveVar <- meta$inactiveVar[-1]
} else {
save <- form$marg[1]
lastInactive <- length(meta$inactiveVar)
form$marg[1] <- meta$inactiveVar[lastInactive]
meta$inactiveVar <- c(save, meta$inactiveVar[-lastInactive])
}
meta$form <- as.formula(paste_formula(form))
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
rotateSplit = function() {
if (length(grep("v", meta$divider[1])) > 0)
meta$divider[1] <- gsub("v", "h", meta$divider[1])
else
meta$divider[1] <- gsub("h", "v", meta$divider[1])
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
unconditionVar = function() {
form = parse_product_formula(meta$form)
if (length(form$cond) < 1) return()
# take last conditioning variable and move in as first split
form$marg <- c(form$marg, form$cond[1])
form$cond <- form$cond[-1]
meta$form <- as.formula(paste_formula(form))
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
conditionVar = function() {
form = parse_product_formula(meta$form)
if (length(form$marg) < 1) return()
# take fist split and condition on it
firstSplit <- length(form$marg)
form$cond <- c(form$marg[firstSplit], form$cond)
form$marg <- form$marg[-firstSplit]
meta$form <- as.formula(paste_formula(form))
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
meta$brush.size = c(1, -1) * apply(meta$limits, 2, diff) / 15
main_draw = function(layer, painter) {
if (redoColor) recalcColor()
color <- "grey30"
with(meta$mdata, qdrawRect(painter,l,b,r,t, fill="white", stroke=color))
with(meta$cdata, qdrawRect(painter,l,b,r,t, fill=as.character(.color), stroke=as.character(.color)))
zeroes <- subset(meta$mdata, .wt==0, drop=FALSE)
if (nrow(zeroes) > 0) {
qdrawCircle(painter, zeroes$l, zeroes$b, r = 3,
stroke = color, fill = "white")
}
}
brush_draw = function(layer, painter) {
if (redoHiliting) recalcHiliting()
color <- b$color
with(meta$hdata, qdrawRect(painter,l,b,r,t, fill=color, stroke=color))
draw_brush(layer, painter, data, meta)
}
brush_mouse_press = function(layer, event) {
common_mouse_press(layer, event, data, meta)
}
brush_mouse_move = function(layer, event) {
rect = qrect(update_brush_size(meta, event))
hits = layer$locate(rect)
hits <- hits[hits < nrow(meta$mdata)]
if (length(hits)) {
## rectangles are drawn in the same order as in mdata
# print(hits)
form <- parse_product_formula(meta$form)
var <- unlist(c(form$marg, form$cond))
selected <- meta$mdata[hits+1, var, drop=FALSE]
condstr = constructCondition(selected)
hits = with(data.frame(data), which(eval(parse(text=condstr))))
}
selected(data) = mode_selection(selected(data), hits, mode = b$mode)
common_mouse_move(layer, event, data, meta)
}
brush_mouse_release = function(layer, event) {
brush_mouse_move(layer, event)
common_mouse_release(layer, event, data, meta)
}
key_press = function(layer, event) {
common_key_press(layer, event, data, meta)
key <- event$key()
if (key == Qt$Qt$Key_Up) { # arrow up
removeSplit()
}
if (key == Qt$Qt$Key_Down) { # arrow down
addSplit()
}
if (key == Qt$Qt$Key_Right) { # arrow right
rotateRight()
}
if (key == Qt$Qt$Key_Left) { # arrow right
rotateLeft()
}
if (key == Qt$Qt$Key_R) { # 'r' or 'R' for 'rotate'
rotateSplit()
}
if (key == Qt$Qt$Key_U) { # 'u' or 'U' for 'uncondition'
unconditionVar()
}
if (key == Qt$Qt$Key_C) { # 'c' or 'C' for 'condition'
conditionVar()
}
if (key == Qt$Qt$Key_B) { # 'b' or 'B' for 'spine to Bar'
firstletter <- substr(meta$divider[1],1,1)
meta$divider[1] <- sprintf("%sbar", firstletter)
recalc()
layer.main$invalidateIndex()
qupdate(layer.main)
}
if (key == Qt$Qt$Key_S) { # 's' or 'S' for 'bar to Spine'
firstletter <- substr(meta$divider[1],1,1)
meta$divider[1] <- sprintf("%sspine", firstletter)
recalc()
layer.main$invalidateIndex(); qupdate(layer.main)
}
}
key_release = function(layer, event) {
common_key_release(layer, event, data, meta)
}
identify_hover = function(layer, event) {
if (!b$identify) return()
b$cursor = 2L
meta$pos = as.numeric(event$pos())
meta$identified = layer$locate(identify_rect(meta))
qupdate(layer.identify)
}
identify_draw = function(layer, painter) {
if (!b$identify || !length(idx <- meta$identified)) return()
idx <- idx[idx <= nrow(meta$mdata)]
if (length(idx) == 0) return()
idx = idx + 1
form <- parse_product_formula(meta$form)
var <- rev(unlist(c(form$marg, form$cond)))
for(i in var)
meta$mdata[,i] <- as.character(meta$mdata[,i])
id <- paste(var, meta$mdata[idx, var], sep=": ", collapse="\n")
sumwt <- sum(meta$mdata[, ".wt"])
ivals <- paste(sprintf("\ncount: %s\nproportion: %.2f%%",
meta$mdata[idx, ".wt"],
meta$mdata[idx, ".wt"]/sumwt*100), collapse="\n")
meta$identify.labels = paste(id, ivals, collapse="")
draw_identify(layer, painter, data, meta)
qdrawRect(painter, meta$mdata$l[idx], meta$mdata$b[idx], meta$mdata$r[idx],
meta$mdata$t[idx], stroke = b$color, fill = NA)
}
scene = qscene()
layer.root = qlayer(scene)
layer.main =
qlayer(paintFun = main_draw,
mousePressFun = brush_mouse_press, mouseReleaseFun = brush_mouse_release,
mouseMoveFun = brush_mouse_move, hoverMoveFun = identify_hover,
keyPressFun = key_press, keyReleaseFun = key_release,
focusInFun = function(layer, event) {
common_focus_in(layer, event, data, meta)
}, focusOutFun = function(layer, event) {
common_focus_out(layer, event, data, meta)
},
limits = qrect(meta$limits))
layer.brush = qlayer(paintFun = brush_draw, limits = qrect(meta$limits))
layer.identify = qlayer(paintFun = identify_draw, limits = qrect(meta$limits))
layer.title = qmtext(meta = meta, side = 3)
layer.xlab = qmtext(meta = meta, side = 1)
layer.ylab = qmtext(meta = meta, side = 2)
layer.xaxis = qaxis(meta = meta, side = 1)
layer.yaxis = qaxis(meta = meta, side = 2)
layer.grid = qgrid(meta = meta)
layer.keys = key_layer(meta)
layer.root[0, 2] = layer.title
layer.root[2, 2] = layer.xaxis
layer.root[3, 2] = layer.xlab
layer.root[1, 1] = layer.yaxis
layer.root[1, 0] = layer.ylab
layer.root[1, 2] = layer.grid
layer.root[1, 2] = layer.main
layer.root[1, 2] = layer.brush
layer.root[1, 2] = layer.keys
layer.root[1, 2] = layer.identify
layer.root[1, 3] = qlayer()
set_layout = function() {
fix_dimension(layer.root,
row = list(id = c(0, 2, 3), value = c(prefer_height(meta$main),
prefer_height(meta$xlabels),
prefer_height(meta$xlab))),
column = list(id = c(1, 0, 3), value = c(prefer_width(meta$ylabels),
prefer_width(meta$ylab, FALSE),
10)))
}
set_layout()
meta$mainChanged$connect(set_layout)
meta$xlabChanged$connect(set_layout); meta$ylabChanged$connect(set_layout)
meta$xlabelsChanged$connect(set_layout); meta$ylabelsChanged$connect(set_layout)
view = qplotView(scene = scene)
view$setWindowTitle(sprintf('Mosaic plot: %s', meta$main))
meta$xlabChanged$connect(setxlab)
meta$ylabChanged$connect(setylab)
meta$formChanged$connect(function() {
meta$main = settitle(meta$form)
view$setWindowTitle(sprintf('Mosaic plot: %s', meta$main))
})
d.idx = add_listener(data, function(i, j) {
switch(j, .brushed = { redoHiliting <<-TRUE; qupdate(layer.main)},
.color = {
redoColor <<-TRUE;
qupdate(layer.main)
}, {
compute_coords(); redoHiliting<<- TRUE
redoColor <<- TRUE; flip_coords()
layer.main$invalidateIndex()
qupdate(layer.grid); qupdate(layer.xaxis); qupdate(layer.yaxis)
qupdate(layer.main)
})
})
qconnect(layer.main, 'destroyed', function(x) {
## b$colorChanged$disconnect(b.idx)
remove_listener(data, d.idx)
})
b$cursorChanged$connect(function() {
set_cursor(view, b$cursor)
})
sync_limits(meta, layer.main, layer.brush, layer.identify)
meta$manual.brush = function(pos) {
brush_mouse_move(layer = layer.main, event = list(pos = function() pos))
}
attr(view, 'meta') = meta
view
}
Mosaic.meta =
setRefClass("Mosaic_meta", contains = "CommonMeta",
fields = properties(list(
var = 'character',
form='formula',
divider='character',
origForm='formula',
origDivider='character',
inactiveVar='character',
inactiveDivider='character',
mdata='data.frame',
hdata='data.frame',
cdata='data.frame'
)))
|
8eb13bbe65be0e8944964e1a88e4e0c8fda85d73
|
a6253060e42e9bb8393f2dae6a0ecf395c873c19
|
/R_scripts_and_data/fig3.R
|
96ce676783b77b7f435458089e2eaef80e8bddcf
|
[] |
no_license
|
idopen/asymmetry_and_ageing
|
25af63061553b484a3167c523cda443c08fb1026
|
9e1001366a0f638f5d1cd899a79e931d80a1e13f
|
refs/heads/master
| 2022-11-27T10:22:59.807279 | 2020-08-03T12:02:11 | 2020-08-03T12:02:11 | 284,663,122 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 12,231 |
r
|
fig3.R
|
## figure 2:
## panel of plots
## A: dynamics r=1
## B: dynamics r=0
## C: damage r=0,1
library(tidyverse)
library(cowplot)
library(mgcv)
## fig2A: dynamics for m0=1
## read data (dynamics)
dA <- read.table("evol_m0_1.txt",header = F)
names(dA) <- c("generation","popsize","age","rep0","rep1","rpr0",
"rpr1","har0","har1","tra0","tra1","dam0","dam1","res0","res1",
paste0("v0_",0:15),paste0("v1_",0:15))
dA <- mutate(dA, for0 = 1-rep0-rpr0,
for1 = 1-rep1-rpr1)
dA2 <- with(dA, data.frame(y = c(for0,for1,rpr0,rpr1,rep0,rep1)))
dA2$cycle <- rep(dA$generation,6)
dA2$trait <- factor(rep(c("forage","repair","repro"), each = 2*nrow(dA)))
dA2$cell <- factor(rep(c(2,1),each=nrow(dA),times=3))
my_font_size <- 16
fig2A <- ggplot(dA2) + theme_cowplot(my_font_size) +
geom_line(aes(x=cycle, y=y, lty=cell, color=trait),
size = 1.5) +
scale_x_continuous(expand = c(0,0),
limits = c(0,10000),
breaks = seq(0,10000,2500)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,1),
breaks = seq(0,1,0.5)) +
scale_color_manual(values = c("darkgreen","brown","orange")) +
scale_linetype_manual(values = c(1,2)) +
labs(x = "\nTime", y = "Allocation",
lty = "Cell type:", color = "Trait:") +
background_grid(major = "xy", minor = "y") +
#theme(legend.position = "top") +
theme(axis.text.x = element_blank()) +
theme(axis.title.x = element_blank()) +
ggtitle(" r = 1")
fig2A
## fig2B: m0 = 0
## read data (dynamics)
dB <- read.table("evol_m0_0.txt",header = F)
names(dB) <- c("generation","popsize","age","rep0","rep1","rpr0",
"rpr1","har0","har1","tra0","tra1","dam0","dam1","res0","res1",
paste0("v0_",0:15),paste0("v1_",0:15))
dB <- mutate(dB, for0 = 1-rep0-rpr0,
for1 = 1-rep1-rpr1)
dB2 <- with(dB, data.frame(y = c(for0,for1,rpr0,rpr1,rep0,rep1)))
dB2$cycle <- rep(dB$generation,6)
dB2$trait <- factor(rep(c("forage","repair","repro"), each = 2*nrow(dB)))
dB2$cell <- factor(rep(c(1,2),each=nrow(dB),times=3))
fig2B <- ggplot(dB2) + theme_cowplot(my_font_size) +
geom_line(aes(x=cycle, y=y, lty=cell, color=trait),
size = 1.5) +
scale_x_continuous(expand = c(0,0),
limits = c(0,10000),
breaks = seq(0,10000,2500)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,1),
breaks = seq(0,1,0.5)) +
scale_color_manual(values = c("darkgreen","brown","orange")) +
scale_linetype_manual(values = c(1,2)) +
labs(x = "\nTime", y = "Allocation",
lty = "Cell type:", color = "Trait:") +
background_grid(major = "xy", minor = "y") +
# theme(legend.position = "top") +
theme(axis.text.x = element_blank()) +
theme(axis.text.y = element_blank()) +
theme(axis.title.y = element_blank()) +
theme(axis.title.x = element_blank()) +
ggtitle(" r = 0")
fig2B
## fig2C: resources + damage
## damage multiplier:
dm <- 10
damage_color <- "#d96125"
dC <- with(dA, data.frame(y = c(res0,res1,dm*dam0,dm*dam1)))
dC$cycle <- rep(dA$generation,4)
dC$trait <- factor(rep(c("resources","damage"), each = 2*nrow(dA)))
dC$cell <- factor(rep(c(2,1),each=nrow(dA),times=2))
fig2C <- ggplot(dC) + theme_cowplot(my_font_size) +
geom_line(aes(x=cycle, y=y, lty=cell, color=trait),
size = 1.5) +
scale_x_continuous(expand = c(0,0),
limits = c(0,10000),
breaks = seq(0,10000,2500)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,8),
breaks = seq(0,8,2)) +
scale_color_manual(values = c(damage_color,"blue")) +
scale_linetype_manual(values = c(1,2), ) +
labs(x = "Time", y = "Amount",
lty = "Cell type:", color = "Trait:") +
background_grid(major = "xy", minor = "y") +
theme(axis.text.x = element_blank()) +
theme(axis.title.y = element_text(margin = margin(t=0, r=15, b=0, l=0))) +
guides(linetype = FALSE)
fig2C
## fig2C: resources + damage
## damage multiplier:
dD <- with(dB, data.frame(y = c(res0,res1,dm*dam0,dm*dam1)))
dD$cycle <- rep(dB$generation,4)
dD$trait <- factor(rep(c("resources","damage"), each = 2*nrow(dB)))
dD$cell <- factor(rep(c(1,2),each=nrow(dB),times=2))
fig2D <- ggplot(dD) + theme_cowplot(my_font_size) +
geom_line(aes(x=cycle, y=y, lty=cell, color=trait),
size = 1.5) +
scale_x_continuous(expand = c(0,0),
limits = c(0,10000),
breaks = seq(0,10000,2500)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,8),
breaks = seq(0,8,2)) +
scale_color_manual(values = c(damage_color,"blue")) +
scale_linetype_manual(values = c(1,2), ) +
labs(x = "Time",
lty = "Cell type:", color = "Trait:") +
background_grid(major = "xy", minor = "y") +
theme(axis.text.x = element_blank()) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_blank()) +
guides(linetype = FALSE)
fig2D
## Grid:
prow1 <- plot_grid(
fig2A + theme(legend.position="none"),
fig2B + theme(legend.position="none"),
align = 'vh',
labels = c("A", "B"),
label_size = 18,
hjust = -1,
nrow = 1
)
legend1 <- get_legend(
# create some space to the left of the legend
fig2A + theme(legend.box.margin = margin(0, 0, 0, 12)) +
theme(legend.key.width=unit(1.5,"cm"))
)
prow2 <- plot_grid(
fig2C + theme(legend.position="none"),
fig2D + theme(legend.position="none"),
align = 'vh',
labels = c("C", "D"),
label_size = 18,
hjust = -1,
nrow = 1
)
legend2 <- get_legend(
# create some space to the left of the legend
fig2C + theme(legend.box.margin = margin(0, 0, 0, 12)) +
theme(legend.key.width=unit(1.5,"cm"))
)
plot_grid(prow1, legend1,
prow2, legend2,
nrow = 2,
axis = "l",
align = "v",
rel_widths = c(3, .6, 3, .6))
##################
## fig1A: no tern because can't combine with cowplot :-(
d_time <- d3 %>% filter(cycle <= 1000)
fig1A <- ggplot(d_time) + theme_cowplot(my_font_size) +
geom_line(aes(x=cycle, y=y, lty=cell, color=trait),
size = 1.5) +
scale_x_continuous(expand = c(0,0),
limits = c(0,1000),
breaks = seq(0,1000,250)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,1),
breaks = seq(0,1,0.5)) +
scale_color_manual(values = c("darkgreen","brown","orange")) +
scale_linetype_manual(values = c(1,3)) +
labs(x = "\nTime", y = "Allocation") +
background_grid(major = "xy", minor = "y") +
theme(legend.position = "right") +
theme(axis.text.x = element_blank())
fig1A
## fig 1B: age distribution
age_fill <- "#a887ab"
fig1B <- ggplot(d,aes(age)) + theme_cowplot(my_font_size) +
geom_histogram(aes(y=..density..),
binwidth = 1,
fill = age_fill,
color = "black") +
scale_x_continuous(expand = c(0,0),
limits = c(0,15),
breaks = seq(1,13,2)) +
labs(x = "Age", y = "Distribution\n") +
background_grid(major = "xy", minor = "y") +
theme(axis.text.y = element_blank())
fig1B
## fig1C: damage distribution
damage_fill <- "#d96125"
fig1C <- ggplot(d,aes(dam0)) + theme_cowplot(my_font_size) +
geom_histogram(aes(y=..density..),
binwidth = 0.2,
fill = damage_fill,
color = "black") +
scale_x_continuous(expand = c(0,0),
limits = c(0,6),
breaks = 0:6) +
labs(x = "Damage", y = "Distribution\n") +
background_grid(major = "xy", minor = "y") +
theme(axis.text.y = element_blank())
fig1C
## fig1D: freq deleterious alleles vs. damage
V <- d[,14:29]
V_means <- apply(V,2,mean)
damage_vals <- seq(0.5*6/15,6+0.5*6/15,length.out = 16)
d_1D <- data.frame(damage_int=1:16,freq=V_means)
my_del_col <- "#d92567"
fig1D <- ggplot(d_1D,aes(damage_int,freq)) + theme_cowplot(my_font_size) +
geom_point(size = 3.5, color = my_del_col) +
scale_x_continuous(expand = c(0,0),
limits = c(0.5,16.5),
breaks = seq(1,15,2)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,1),
breaks = seq(0,1,0.5)) +
labs(x = "Damage interval", y = "Mutant frequency") +
background_grid(major = "xy", minor = "y")
fig1D
## fig1E
d_dam <- d %>% group_by(age) %>% summarise(y = median(dam0),
ymin = quantile(dam0,0.2),
ymax = quantile(dam0,0.8))
d_dam <- d_dam %>% filter(age <= 15)
fig1E <- ggplot(d_dam,aes(age,y)) + theme_cowplot(my_font_size) +
geom_point(size = 3.5, color = damage_fill) +
geom_errorbar(aes(ymin=ymin,ymax=ymax),
width=0,
color = damage_fill) +
scale_x_continuous(expand = c(0,0),
limits = c(0.5,15.5),
breaks = seq(1,15,2)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,4),
breaks = seq(0,4,1)) +
labs(x = "Age", y = "Damage") +
background_grid(major = "xy", minor = "y")
fig1E
## fig1F: mortality vs. age
d_mort <- d %>% group_by(age) %>% summarise(y = mean(dead))
d_mort <- d_mort %>% filter(age <= 15)
d_gam <- d %>% filter(age <= 15)
m1 <- gam(dead ~ s(age), family = binomial, data = d_gam)
gam.check(m1)
d.pred <- data.frame(age=seq(1,15,0.1))
fitted <- predict(m1,newdata = d.pred, type = "response", se.fit = T)
d.pred <- d.pred %>% mutate(y = fitted$fit,
ymin = y-1.96*fitted$se.fit,
ymax = y+1.96*fitted$se.fit)
fig1F <- ggplot(d_mort,aes(age,y)) + theme_cowplot(my_font_size) +
geom_point(size = 3.5, color = my_del_col) +
geom_line(data = d.pred, color = my_del_col, size = 1.3) +
geom_ribbon(data = d.pred, aes(ymin=ymin,ymax=ymax),
alpha = 0.3) +
scale_x_continuous(expand = c(0,0),
limits = c(0.5,15.5),
breaks = seq(1,15,2)) +
scale_y_continuous(expand = c(0,0),
limits = c(0,0.06),
breaks = seq(0,0.06,0.02)) +
labs(x = "Age", y = "Mortality") +
background_grid(major = "xy", minor = "y")
fig1F
## Grid:
plot_grid(fig1A, fig1B, fig1C, fig1D, fig1E, fig1F,
labels = c('A','B','C','D','E','F'),
label_size = 18,
ncol = 3)
##########################################################
## fig1A: ternary
library(ggtern)
d2 <- read.table("evol_sym.txt",header = F)
names(d2) <- c("generation","popsize","age","rep0","rep1","rpr0",
"rpr1","har0","har1","tra0","tra1","dam0","dam1","res0","res1",
paste0("v0_",0:15),paste0("v1_",0:15))
d3 <- with(d2, data.frame(generation=rep(generation,2),
reproduction=c(rep0,rep1),
repair=c(rpr0,rpr1),
foraging=c(1-rep0-rpr0,1-rep1-rpr1),
cell=as.factor(rep(c(1,2),each=nrow(d2)))))
## smooth curve
d4 <- d3 %>% filter(generation < 2000)
dT <- data.frame(foraging=c(0.333,0.0),repair=c(0.333,0.333),reproduction=c(0.333,0.666),
cell=as.factor(c(1,1)))
dR <- data.frame(foraging=c(0.333,0.666),repair=c(0.333,0.0),reproduction=c(0.333,0.333),
cell=as.factor(c(1,1)))
dL <- data.frame(foraging=c(0.333,0.333),repair=c(0.333,0.666),reproduction=c(0.333,0.0),
cell=as.factor(c(1,1)))
fig1A <- ggtern(d4,aes(x=foraging,y=repair,z=reproduction,color=cell)) +
geom_line(data=dT,color="black",lty=2) +
geom_line(data=dR,color="black",lty=2) +
geom_line(data=dL,color="black",lty=2) +
geom_point(size=0.7,alpha=0.4) +
#geom_line(alpha=0.4,lwd=0.7) +
labs(x="",xarrow="Foraging %",
y="",yarrow="Repair %",
z="",zarrow="Reproduction %") +
theme_bw(base_size = 20) +
theme_showsecondary() +
theme_showarrows() +
theme(legend.position=c(0.0,0.7), legend.justification=c(-0.1,1))
fig1A
######################################
|
5150f285b87a634e8568c5070aeadd4f726fe1d3
|
4a2f9a190e08ce4f60156c5b56094ab48c5fa295
|
/Simple Linear Regression/emp_data.R
|
05d0b361158b5c9426b4842d8cc14899d0100afd
|
[] |
no_license
|
Tusharbagul/Machine-Learning-With-R
|
2b5faa76b195895d084f8452dc76b428d6d9cd58
|
99c935e63d12463cc2861d4417fb0cb9a5eeb0bf
|
refs/heads/master
| 2022-12-01T15:02:13.666455 | 2020-08-13T12:27:52 | 2020-08-13T12:27:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,025 |
r
|
emp_data.R
|
#Read Data
emp <- read.csv('C:/Users/Tushar Bagul/Desktop/Data_Science/Assignments/simpleLinearregression/emp_data.csv')
colnames(emp)
#correlation matrix
cor(emp)
#Regression model and summary
model1 <- lm(Churn_out_rate~Salary_hike, data = emp)
summary(model1)
#New Data Frame With New Data
churn_rate = data.frame(Salary_hike=c(1600))
#Predict For The New Data
churn = predict(model1, churn_rate)
churn
#Predict For Weight Variable From Historical Data
pred <- predict(model1)
pred
#Prepare A New Data Frame With Pred And Error
newdata<-data.frame(emp,pred,"Error"= emp$Churn_out_rate - pred)
newdata
#Transforming input using square function
model2 <- lm(Churn_out_rate~Salary_hike + I(Salary_hike^2), data=emp)
summary(model2)
#predicting using model2
pred2 <-predict(model2)
pred2
#Prepare new df with pred2 and error
newdata2 <- data.frame(emp, pred2, "Error"=emp$Churn_out_rate - pred2)
newdata2
#plots
plot(emp, pch=16, col="blue")
plot(model2, pch=16, col="blue")
|
8bc19fe3f8a99bd83f9be71a0bd14c6e48f415b0
|
185eb75246acc598d15d43a6a487ef2ee0b3d231
|
/R/mousebrain.org/preprocess-loom.R
|
8948b3747b4f6d508965a62fabea083da09d213c
|
[] |
no_license
|
suzannejin/SCT-MoA
|
4cd295da2252475d482905bbdfffa48aa9ca4c2d
|
bfd455479d94db92d30153b763d06f5732879606
|
refs/heads/master
| 2023-05-30T01:18:39.043455 | 2019-02-25T18:20:10 | 2019-02-25T18:20:10 | 362,417,400 | 0 | 0 | null | 2021-04-28T09:50:38 | 2021-04-28T09:50:37 | null |
UTF-8
|
R
| false | false | 1,505 |
r
|
preprocess-loom.R
|
# Preprocess loom files.
setwd("~/git/SCT-MoA")
options(stringsAsFactors = F)
# usage: preprocess-loom.R <input_dir> <output_dir>
args = commandArgs(trailingOnly = T)
if (length(args) < 2)
stop("must provide input and output directories")
input_dir = args[1]
output_dir = args[2]
if (!dir.exists(input_dir))
stop("input directory does not exist")
if (!dir.exists(output_dir))
dir.create(output_dir, recursive = T)
# read protein-coding genes
coding = read.delim("data/ensembl/protein_coding_genes.txt.gz")
# process files one at a time
files = list.files(input_dir, pattern = "*.csv.gz", full.names = T)
for (file in files) {
message("processing ", basename(file), " ...")
# read data
dat = read.csv(file, check.names = F)
# filter empty genes
genes = dat[[1]]
expr = t(dat[, -1])
colnames(expr) = genes
expr = expr[rowSums(expr) > 0, colSums(expr) > 0]
# filter to protein-coding genes
expr = expr[, colnames(expr) %in% coding$gene]
# filter to the top 5,000 genes
present = colSums(expr > 0)
ranks = rank(present, ties.method = 'random')
keep = ranks > ncol(expr) - 5e3
expr = expr[, keep]
# write
clean_name = chartr(' /', '__', gsub("\\.gz", "", basename(file)))
output_file = file.path(output_dir, clean_name)
write.table(expr, output_file, quote = F, sep = "\t", row.names = T)
system(paste("gzip --force", output_file))
message(" wrote file ", basename(file), " with ", nrow(expr), " cells and ",
ncol(expr), " genes")
}
|
934464cf4438e8090390a3798e889665d6f17072
|
7f141116154eed50968bddd35c9a47b7194e9b88
|
/R/richness_objective_bayes.R
|
b5e3ebfe2915c215c8a600a43b8b9fae583bc869
|
[] |
no_license
|
adw96/breakaway
|
36a9d2416db21172f7623c1810d2c6c7271785ed
|
d81b1799f9b224113a58026199a849c2ec147524
|
refs/heads/main
| 2022-12-22T06:20:56.466849 | 2022-11-22T22:35:57 | 2022-11-22T22:35:57 | 62,469,870 | 65 | 22 | null | 2022-11-22T22:35:58 | 2016-07-02T21:10:56 |
R
|
UTF-8
|
R
| false | false | 36,028 |
r
|
richness_objective_bayes.R
|
#' Objective Bayes species richness estimate with the Negative Binomial model
#'
#' @param data TODO(Kathryn)
#' @param output TODO(Kathryn)
#' @param plot TODO(Kathryn)
#' @param answers TODO(Kathryn)
#' @param tau TODO(Kathryn)
#' @param burn.in TODO(Kathryn)
#' @param iterations TODO(Kathryn)
#' @param Metropolis.stdev.N TODO(Kathryn)
#' @param Metropolis.start.T1 TODO(Kathryn)
#' @param Metropolis.stdev.T1 TODO(Kathryn)
#' @param Metropolis.start.T2 TODO(Kathryn)
#' @param Metropolis.stdev.T2 TODO(Kathryn)
#' @param bars TODO(Kathryn)
#'
#' @return A list of results, including \item{est}{the median of estimates of N}, \item{ci}{a confidence interval for N},
#' \item{mean}{the mean of estimates of N}, \item{semeanest}{the standard error of mean estimates},
#' \item{dic}{the DIC of the model}, \item{fits}{fitted values}, and \item{diagnostics}{model diagonstics}.
#'
#' @importFrom stats acf
#' @importFrom graphics hist par plot
#'
#' @export
objective_bayes_negbin <- function(data,
output=TRUE,
plot=TRUE,
answers=FALSE,
tau=10,
burn.in=1000,
iterations=5000,
Metropolis.stdev.N=100,
Metropolis.start.T1=-0.8,
Metropolis.stdev.T1=0.01,
Metropolis.start.T2=0.8,
Metropolis.stdev.T2=0.01,
bars=5) {
data <- check_format(data)
fullfreqdata <- data
if (tau > max(data[,1])) {
tau <- max(data[,1])
}
# calculate summary statistics on full data
w<-sum(fullfreqdata[,2])
n<-sum(fullfreqdata[,1]*fullfreqdata[,2])
# subset data up to tau
freqdata<-fullfreqdata[1:tau,]
# calculate summary statistics on data up to tau
w.tau<-sum(freqdata[,2])
n.tau<-sum(freqdata[,1]*freqdata[,2])
# calculate NP estimate of n0
NP.est.n0<-w.tau/(1-freqdata[1,2]/n.tau)-w.tau
### Step 3: calculate posterior
## initialization
iterations<-iterations+burn.in
N<-rep(0,iterations)
T1T2<-matrix(rep(c(Metropolis.start.T1,Metropolis.start.T2),each=iterations),ncol=2)
# to track acceptance rate of T1T2
a1<-0
# to track acceptance rate of N
a2<-0
# starting value based on nonparametric estimate of n0
N[1]<-ceiling(NP.est.n0)+w.tau
# storage for deviance replicates
D.post<-rep(0,iterations)
for (i in 2:iterations){
# print every 500th iteration number
if (i %in% seq(0,iterations-burn.in,by=500)) {message(paste("starting iteration ",i," of ",iterations,sep=""))}
## sample from p(T1T2|N,x)
## propose value T1T2 from a bivariate normal dist.; make sure T1T2.new > {-1,0}
repeat {
T1T2.new <- rmvnorm(1, c(T1T2[i-1,1],T1T2[i-1,2]),
matrix(c(Metropolis.stdev.T1,0,0,Metropolis.stdev.T2),nrow=2))
if(T1T2.new[1]>(-1) & T1T2.new[2]>0)
break
}
# calculate log of acceptance ratio
logr1<-(-1)*log(T1T2.new[1]^2+2*T1T2.new[1]+2)-log(1+T1T2.new[2]^2)+n.tau*log(T1T2.new[2])-
(N[i-1]*(1+T1T2.new[1])+n.tau)*log(1+T1T2.new[1]+T1T2.new[2])+
N[i-1]*(1+T1T2.new[1])*log(1+T1T2.new[1])+
sum(freqdata[,2]*lgamma(1+T1T2.new[1]+freqdata[,1]))-
w.tau*lgamma(1+T1T2.new[1])+log(T1T2[i-1,1]^2+2*T1T2[i-1,1]+2)+
log(1+T1T2[i-1,2]^2)-n.tau*log(T1T2[i-1,2])+
(N[i-1]*(1+T1T2[i-1,1])+n.tau)*log(1+T1T2[i-1,1]+T1T2[i-1,2])-
N[i-1]*(1+T1T2[i-1,1])*log(1+T1T2[i-1,1])-
sum(freqdata[,2]*lgamma(1+T1T2[i-1,1]+freqdata[,1]))+
w.tau*lgamma(1+T1T2[i-1,1])
# calculate acceptance ratio
r1<-exp(logr1)
# accept or reject propsed value
if (runif(1) < min(r1,1)) {
T1T2[i,]<-T1T2.new
a1<-a1+1
} else {
T1T2[i,]<-T1T2[i-1,]
}
## sample from p(N|A,G,x)
## make sure N.new >=w.tau
repeat {
N.new<-rnbinom(1,mu=N[i-1],size=Metropolis.stdev.N)
if(N.new>w.tau-1)
break
}
## calculate log(N.new!/(N.new-w.tau)!)
N3.new<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.new[j+1]<-log(N.new-j)
}
N2.new<-sum(N3.new)
## calculate log(N[i-1]!/(N[i-1]-w.tau)!)
N3<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3[j+1]<-log(N[i-1]-j)
}
N2<-sum(N3)
# calculate log of acceptance ratio
logr2<-(-1/2)*log(N.new)+N2.new+N.new*(1+T1T2[i,1])*log((1+T1T2[i,1])/(1+T1T2[i,1]+T1T2[i,2]))+
log(dnbinom(N[i-1],mu=N.new,size=Metropolis.stdev.N))+
(1/2)*log(N[i-1])-N2-
N[i-1]*(1+T1T2[i,1])*log((1+T1T2[i,1])/(1+T1T2[i,1]+T1T2[i,2]))-
log(dnbinom(N.new,mu=N[i-1],size=Metropolis.stdev.N))
# calculate acceptance ratio
r2<-exp(logr2)
# accept or reject propsed value
if (runif(1)<min(r2,1)) {N[i]<-N.new ; a2<-a2+1} else {N[i]<-N[i-1]}
## calculate deviance from current sample
# calculate log(N[i]!/(N[i]-w.tau)!)
N3.curr<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.curr[j+1]<-log(N[i]-j)
}
N2.curr<-sum(N3.curr)
# calculate deviance
D.post[i]<-(-2)*(N2.curr+n.tau*log(T1T2[i,2])-(N[i]*(1+T1T2[i,1])+n.tau)*log(1+T1T2[i,1]+T1T2[i,2])+N[i]*(1+T1T2[i,1])*log(1+T1T2[i,1])+sum(freqdata[,2]*lgamma(1+T1T2[i,1]+freqdata[,1]))-w.tau*lgamma(1+T1T2[i,1])-sum(lfactorial(freqdata[,2]))-sum(freqdata[,2]*lgamma(freqdata[,1]+1)))
}
### Step 4: model diagnostics
## 1) deviance at posterior mean
mean.T1<-mean(T1T2[(burn.in+1):iterations,1])
mean.T2<-mean(T1T2[(burn.in+1):iterations,2])
mean.N<-mean(N[(burn.in+1):iterations])
## calculate log(mean.N!/(mean.N-w.tau)!)
N3.mean<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.mean[j+1]<-log(mean.N-j)
}
N2.mean<-sum(N3.mean)
loglik.post.mean<-N2.mean+n.tau*log(mean.T2)-(mean.N*(1+mean.T1)+n.tau)*log(1+mean.T1+mean.T2)+mean.N*(1+mean.T1)*log(1+mean.T1)+sum(freqdata[,2]*lgamma(1+mean.T1+freqdata[,1]))-w.tau*lgamma(1+mean.T1)-sum(lfactorial(freqdata[,2]))-sum(freqdata[,2]*lgamma(freqdata[,1]+1))
D.mean<-(-2)*loglik.post.mean
## 2) posterior mean and median deviances
mean.D<-mean(D.post[(burn.in+1):iterations])
median.D<-quantile(D.post[(burn.in+1):iterations],probs=.5,names=F)
## 3) model complexity
p.D<-mean.D-D.mean
## 4) Deviance information criterion
DIC<-2*p.D+D.mean
### Step 5: fitted values based on medians of the marginal posteriors
median.T1<-quantile(T1T2[(burn.in+1):iterations,1],probs=.5,names=F)
median.T2<-quantile(T1T2[(burn.in+1):iterations,2],probs=.5,names=F)
median.N<-quantile(N[(burn.in+1):iterations],probs=.5,names=F)
fits<-rep(0,tau)
for (k in 1:tau){
fits[k]<-(median.N)*dnbinom(k,size=median.T1+1,prob=(median.T1+1)/(median.T1+median.T2+1))
}
fitted.values<-data.frame(cbind(j=seq(1,tau),fits,count=freqdata[,2]))
### Step 6: estimate thinning to reduce correlated posterior samples
lags<-acf(N[(burn.in+1):iterations],type="correlation",main="Autocorr plot",ylab="ACF",xlab="Lag", plot=F)
lag.thin<-suppressWarnings(min(which(lags$acf<0.1)))
if (lag.thin==Inf) {lag.thin<-paste(">",length(lags$lag),sep="")
}
### Step 7: results
hist.points<-hist(N[(burn.in+1):iterations]+w-w.tau,breaks=seq(w,max(N)+w-w.tau+1)-0.5, plot = FALSE)
results<-data.frame(w=w,
n=n,
NP.est.N=NP.est.n0+w,
tau=tau,
w.tau=w.tau,
n.tau=n.tau,
iterations=iterations-burn.in,
burn.in=burn.in,
acceptance.rate.T1T2=a1/iterations,
acceptance.rate.N=a2/iterations,
lag=lag.thin,
mode.N=hist.points$mids[which.max(hist.points$density)],
mean.N=mean(N[(burn.in+1):iterations])+w-w.tau,
median.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.5,names=F),
LCI.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.025,names=F),
UCI.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.975,names=F),
stddev.N=sd((N[(burn.in+1):iterations]+w-w.tau)),
mean.D=mean.D,
median.D=median.D,
DIC
)
final_results <- list()
final_results$est <- results$median.N
final_results$ci <- c("lower 95%"=results$LCI.N, "upper 95%"=results$UCI.N)
final_results$mean <- results$mean.N
final_results$semeanest <- results$stddev.N
final_results$dic <- DIC
final_results$fits <- fitted.values
final_results$diagnostics<-c("acceptance rate N"=results$acceptance.rate.N,
"acceptance rate T1T2"=results$acceptance.rate.T1T2,
"lag"=results$lag)
if (output) {
# output results and fitted values
print(final_results)
}
if (plot) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1,2))
## posterior histogram
hist(N[(burn.in+1):iterations]+w-w.tau,
main="Posterior distribution",xlab="Total Number of Species",
col='purple',freq=F,ylab="Density")
# make trace plot
plot((burn.in+1):iterations,N[(burn.in+1):iterations]+w-w.tau,type="l",xlab="Iteration Number",ylab="Total Number of Species", main="Trace plot")
}
if (answers) {
return(final_results)
}
}
#' Objective Bayes species richness estimate with the Poisson model
#'
#' @param data TODO(Kathryn)
#' @param output TODO(Kathryn)
#' @param plot TODO(Kathryn)
#' @param answers TODO(Kathryn)
#' @param tau TODO(Kathryn)
#' @param burn.in TODO(Kathryn)
#' @param iterations TODO(Kathryn)
#' @param Metropolis.stdev.N TODO(Kathryn)
#' @param Metropolis.start.lambda TODO(Kathryn)
#' @param Metropolis.stdev.lambda TODO(Kathryn)
#' @param bars TODO(Kathryn)
#'
#' @return A list of results, including \item{est}{the median of estimates of N}, \item{ci}{a confidence interval for N},
#' \item{mean}{the mean of estimates of N}, \item{semeanest}{the standard error of mean estimates},
#' \item{dic}{the DIC of the model}, \item{fits}{fitted values}, and \item{diagnostics}{model diagonstics}.
#'
#' @importFrom graphics hist par plot
#'
#' @export
objective_bayes_poisson <- function(data,
output=TRUE,
plot=TRUE,
answers=FALSE,
tau=10, burn.in=100,
iterations=2500,
Metropolis.stdev.N=75,
Metropolis.start.lambda=1,
Metropolis.stdev.lambda=0.3,
bars=5) {
data <- check_format(data)
fullfreqdata <- data
if (tau > max(data[,1])) {
tau <- max(data[,1])
}
# calculate summary statistics on full data
w<-sum(fullfreqdata[,2])
n<-sum(fullfreqdata[,1]*fullfreqdata[,2])
# subset data up to tau
freqdata<-fullfreqdata[1:tau,]
# calculate summary statistics on data up to tau
w.tau<-sum(freqdata[,2])
n.tau<-sum(freqdata[,1]*freqdata[,2])
# calculate NP estimate of n0
NP.est.n0<-w.tau/(1-freqdata[1,2]/n.tau)-w.tau
### Step 3: calculate posterior
## initialization
iterations<-iterations+burn.in
N<-rep(0,iterations)
L<-c(Metropolis.start.lambda,rep(1,iterations-1))
# to track acceptance rate of lambda
a1<-0
# to track acceptance rate of N
a2<-0
# starting value based on nonparametric estimate of n0
N[1]<-ceiling(NP.est.n0)+w.tau
# storage for deviance replicates
D.post<-rep(0,iterations)
for (i in 2:iterations){
# print every 500th iteration number
if (i %in% seq(0,iterations-burn.in,by=500)) {message(paste("starting iteration ",i," of ",iterations,sep=""))}
## sample from p(lambda|x,C)
# propose value for lambda
L.new<-abs(rnorm(1,mean=L[i-1],sd=Metropolis.stdev.lambda))
# calculate log of acceptance ratio
logr1<-(n.tau-1/2)*log(L.new)-L.new*N[i-1]-(n.tau-1/2)*log(L[i-1])+L[i-1]*N[i-1]
# calculate acceptance ratio
r1<-exp(logr1)
# accept or reject propsed value
if (runif(1)<min(r1,1)) {L[i]<-L.new ; a1<-a1+1} else {L[i]<-L[i-1]}
## sample from p(N|lambda,x)
## make sure N.new >=w.tau
repeat {
N.new<-rnbinom(1,mu=N[i-1],size=Metropolis.stdev.N)
if(N.new>w.tau-1)
break
}
## calculate log(N.new!/(N.new-w.tau)!)
N3.new<-rep(0,w.tau)
N3.new[1:w.tau]<-log(N.new-0:(w.tau-1))
N2.new<-sum(N3.new)
## calculate log(N[i-1]!/(N[i-1]-w.tau)!)
N3<-rep(0,w.tau)
N3[1:w.tau]<-log(N[i-1]- 0:(w.tau-1))
N2<-sum(N3)
# calculate log of acceptance ratio
logr2<-(N2.new-(1/2)*log(N.new)-N.new*L[i])-(N2-(1/2)*log(N[i-1])-N[i-1]*L[i])+(log(dnbinom(N[i-1],mu=N.new,size=Metropolis.stdev.N)))-(log(dnbinom(N.new,mu=N[i-1],size=Metropolis.stdev.N)))
# calculate acceptance ratio
r2<-exp(logr2)
# accept or reject propsed value
if (runif(1)<min(r2,1)) {N[i]<-N.new ; a2<-a2+1} else {N[i]<-N[i-1]}
## calculate deviance from current sample
# calculate log(N[i]!/(N[i]-w.tau)!)
N3.curr<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.curr[1:w.tau]<-log(N[i]-0:(w.tau-1))
}
N2.curr<-sum(N3.curr)
# calculate deviance
D.post[i]<-(-2)*(N2.curr-sum(lfactorial(freqdata[,2]))-L[i]*(N[i])-sum(freqdata[,2]*log(factorial(freqdata[,1])))+n.tau*log(L[i]))
}
### Step 4: model diagnostics
## 1) deviance at posterior mean
mean.L<-mean(L[(burn.in+1):iterations])
mean.N<-mean(N[(burn.in+1):iterations])
## calculate log(mean.N!/(mean.N-w.tau)!)
N3.mean<-rep(0,w.tau)
N3.mean[1:w.tau]<-log(mean.N-0:(w.tau-1))
N2.mean<-sum(N3.mean)
loglik.post.mean<-N2.mean-sum(lfactorial(freqdata[,2]))-mean.L*mean.N+n.tau*log(mean.L)-sum(freqdata[,2]*lfactorial(freqdata[,1]))
D.mean<-(-2)*loglik.post.mean
## 2) posterior mean and median deviances
mean.D<-mean(D.post[(burn.in+1):iterations])
median.D<-quantile(D.post[(burn.in+1):iterations],probs=.5,
names=F)
## 3) model complexity
p.D<-mean.D-D.mean
## 4) Deviance information criterion
DIC<-2*p.D+D.mean
### Step 5: fitted values based on medians of the marginal posteriors
median.L<-quantile(L[(burn.in+1):iterations],probs=.5,names=F)
median.N<-quantile(N[(burn.in+1):iterations],probs=.5,names=F)
fits<-rep(0,tau)
fits[1:tau]<-(median.N)*dpois(1:tau,median.L)
fitted.values<-data.frame(cbind(j=seq(1,tau),fits,count=freqdata[,2]))
### Step 6: estimate thinning to reduce correlated posterior samples
lags<-acf(N[(burn.in+1):iterations],type="correlation",main="Autocorr plot",ylab="ACF",xlab="Lag", plot=F)
lag.thin<-suppressWarnings(min(which(lags$acf<0.1)))
if (lag.thin==Inf) {lag.thin<-paste(">",length(lags$lag),sep="")
}
### Step 7: results
hist.points<-hist(N[(burn.in+1):iterations]+w-w.tau,breaks=seq(w,max(N)+w-w.tau+1)-0.5, plot = FALSE)
results<-data.frame(w=w,
n=n,
NP.est.N=NP.est.n0+w,
tau=tau,
w.tau=w.tau,
n.tau=n.tau,
iterations=iterations-burn.in,
burn.in=burn.in,
acceptance.rate.lambda=a1/iterations,
acceptance.rate.N=a2/iterations,
lag=lag.thin,
mode.N=hist.points$mids[which.max(hist.points$density)],
mean.N=mean(N[(burn.in+1):iterations])+w-w.tau,
median.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.5,names=F),
LCI.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.025,names=F),
UCI.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.975,names=F),
stddev.N=sd((N[(burn.in+1):iterations]+w-w.tau)),
mean.D=mean.D,
median.D=median.D,
DIC)
final_results <- list()
final_results$est <- results$median.N
final_results$ci <- c("lower 95%"=results$LCI.N, "upper 95%"=results$UCI.N)
final_results$mean <- results$mean.N
final_results$semeanest <- results$stddev.N
final_results$dic <- DIC
final_results$fits <- fitted.values
final_results$diagnostics<-c("acceptance rate N"=results$acceptance.rate.N,
"acceptance rate lambda"=results$acceptance.rate.lambda,
"lag"=results$lag)
if (output) {
# output results and fitted values
print(final_results)
}
if (plot) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1,2))
## posterior histogram
hist(N[(burn.in+1):iterations]+w-w.tau,
main="Posterior distribution",xlab="Total Number of Species",
col='purple',freq=F,ylab="Density")
# make trace plot
plot((burn.in+1):iterations,N[(burn.in+1):iterations]+w-w.tau,type="l",xlab="Iteration Number",ylab="Total Number of Species", main="Trace plot")
}
if (answers) {
return(final_results)
}
}
#' Objective Bayes species richness estimate with the mixed-geometric model
#'
#' @param data TODO(Kathryn)
#' @param output TODO(Kathryn)
#' @param plot TODO(Kathryn)
#' @param answers TODO(Kathryn)
#' @param tau TODO(Kathryn)
#' @param burn.in TODO(Kathryn)
#' @param iterations TODO(Kathryn)
#' @param Metropolis.stdev.N TODO(Kathryn)
#' @param Metropolis.start.T1 TODO(Kathryn)
#' @param Metropolis.stdev.T1 TODO(Kathryn)
#' @param Metropolis.start.T2 TODO(Kathryn)
#' @param Metropolis.stdev.T2 TODO(Kathryn)
#' @param bars TODO(Kathryn)
#'
#' @return A list of results, including \item{est}{the median of estimates of N}, \item{ci}{a confidence interval for N},
#' \item{mean}{the mean of estimates of N}, \item{semeanest}{the standard error of mean estimates},
#' \item{dic}{the DIC of the model}, \item{fits}{fitted values}, and \item{diagnostics}{model diagonstics}.
#'
#' @importFrom graphics hist par plot
#'
#' @export
objective_bayes_mixedgeo <- function(data, output=TRUE, plot=TRUE, answers=FALSE,
tau=10, burn.in=100, iterations=2500, Metropolis.stdev.N=100,
Metropolis.start.T1=1, Metropolis.stdev.T1=2,
Metropolis.start.T2=3, Metropolis.stdev.T2=2, bars=3) {
data <- check_format(data)
fullfreqdata <- data
if (tau > max(data[,1])) {
tau <- max(data[,1])
}
# calculate summary statistics on full data
w<-sum(fullfreqdata[,2])
n<-sum(fullfreqdata[,1]*fullfreqdata[,2])
# subset data up to tau
freqdata<-fullfreqdata[1:tau,]
# calculate summary statistics on data up to tau
w.tau<-sum(freqdata[,2])
n.tau<-sum(freqdata[,1]*freqdata[,2])
# calculate NP estimate of n0
NP.est.n0<-w.tau/(1-freqdata[1,2]/n.tau)-w.tau
### Step 3: calculate posterior
## initialization
iterations<-iterations+burn.in
A<-rep(0,iterations)
T1<-rep(0,iterations)
T2<-rep(0,iterations)
N<-rep(0,iterations)
# to track acceptance rate of N
a1<-0
# starting value, nonparametric estimate of n0
N[1]<-ceiling(NP.est.n0)+w.tau
# starting value, MLE of T1
T1[1]<-Metropolis.start.T1
# starting value, MLE of T2
T2[1]<-Metropolis.start.T2
A[1]<-0.5
# storage for deviance replicates
D.post<-rep(0,iterations)
for (i in 2:iterations){
# print every 500th iteration number
if (i %in% seq(0,iterations-burn.in,by=500)) {message(paste("starting iteration ",i," of ",iterations,sep=""))}
## sample from p(Z|A,T1,T2,X,N)
## create a new vector of length N[i-1]
Z<-rep(0,length=N[i-1])
## create a full data vector
X<-c(rep(0,N[i-1]-w.tau),rep(freqdata[,1],times=freqdata[,2]))
## sample random bernoulli with appropriate success prob for each Z[k]; do not allow for Z all zeros or ones
for (k in 1:N[i-1]){
Z[k]<-rbinom(1,1,prob=A[i-1]*(1/(1+T1[i-1]))*(T1[i-1]/(1+T1[i-1]))^X[k]/((A[i-1]*(1/(1+T1[i-1]))*(T1[i-1]/(1+T1[i-1]))^X[k])+((1-A[i-1])
*(1/(1+T2[i-1]))*(T2[i-1]/(1+T2[i-1]))^X[k])))
}
## sample from p(A|Z,T1,T2,X,N)
## sample from beta dist
A[i]<-rbeta(1,shape1=sum(Z)+1,shape2=N[i-1]-sum(Z)+1)
## sample from p(T1|A,Z,T2,X,N) and p(T2|A,Z,T1,X,N)
repeat{
## sample T1/(1+T1) and T2/(1+T2) from beta dists
T1.trans<-rbeta(1,shape1=sum(X*Z)+0.5,shape2=sum(Z)+0.5)
T2.trans<-rbeta(1,shape1=sum(X)-sum(X*Z)+0.5,shape2=N[i-1]-sum(Z)+0.5)
## back transform to T1 and T2
T1[i]<-T1.trans/(1-T1.trans)
T2[i]<-T2.trans/(1-T2.trans)
## keep T1<T2
if(T1[i]<T2[i])
break
}
## sample from p(N|A,T1,T2,Z,X)
## make sure N.new >=w.tau
repeat {
N.new<-rnbinom(1,mu=N[i-1],size=Metropolis.stdev.N)
if(N.new>w.tau-1)
break
}
## calculate log(N.new!/(N.new-w.tau)!)
N3.new<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.new[j+1]<-log(N.new-j)
}
N2.new<-sum(N3.new)
## calculate log(N[i-1]!/(N[i-1]-w.tau)!)
N3<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3[j+1]<-log(N[i-1]-j)
}
N2<-sum(N3)
## calculate log of acceptance ratio
logr1<-(-1/2)*log(N.new)+N2.new+N.new*log(A[i]*(1/(1+T1[i]))+(1-A[i])*(1/(1+T2[i])))+(1/2)*log(N[i-1])-N2-N[i-1]*log(A[i]*(1/(1+T1[i]))+(1-A[i])*(1/(1+T2[i])))+log(dnbinom(N[i-1],mu=N.new,size=Metropolis.stdev.N))-log(dnbinom(N.new,mu=N[i-1],size=Metropolis.stdev.N))
## calculate acceptance ratio
r1<-exp(logr1)
## accept or reject the proposed value
if (runif(1)<min(r1,1)) {N[i]<-N.new ; a1<-a1+1} else {N[i]<-N[i-1]}
## calculate deviance from current sample
## calculate log(N[i]!/(N[i]-w.tau)!)
N3.curr<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.curr[j+1]<-log(N[i]-j)
}
N2.curr<-sum(N3.curr)
# calculate deviance
D.post[i]<-(-2)*(N2.curr+(N[i]-w.tau)*log(A[i]*(1/(1+T1[i]))+(1-A[i])*(1/(1+T2[i])))+sum(freqdata[,2]*log(A[i]*(1/(1+T1[i]))*(T1[i]/(1+T1[i]))^freqdata[,1]+(1-A[i])*(1/(1+T2[i]))*(T2[i]/(1+T2[i]))^freqdata[,1]))-sum(lfactorial(freqdata[,2])))
}
### Step 4: model diagnostics
## 1) deviance at posterior mean
mean.A<-mean(A[(burn.in+1):iterations])
mean.T1<-mean(T1[(burn.in+1):iterations])
mean.T2<-mean(T2[(burn.in+1):iterations])
mean.N<-mean(N[(burn.in+1):iterations])
## calculate log(mean.N!/(mean.N-w.tau)!)
N3.mean<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.mean[j+1]<-log(mean.N-j)
}
N2.mean<-sum(N3.mean)
loglik.post.mean<-N2.curr+(mean.N-w.tau)*log(mean.A*(1/(1+mean.T1))+(1-mean.A)*(1/(1+mean.T2)))+sum(freqdata[,2]*log(mean.A*(1/(1+mean.T1))*(mean.T1/(1+mean.T1))^freqdata[,1]+(1-mean.A)*(1/(1+mean.T2))*(mean.T2/(1+mean.T2))^freqdata[,1]))-sum(lfactorial(freqdata[,2]))
D.mean<-(-2)*loglik.post.mean
## 2) posterior mean and median deviances
mean.D<-mean(D.post[(burn.in+1):iterations])
median.D<-quantile(D.post[(burn.in+1):iterations],probs=.5,names=F)
## 3) model complexity
p.D<-mean.D-D.mean
## 4) Deviance information criterion
DIC<-2*p.D+D.mean
### Step 5: fitted values based on medians of the marginal posteriors
median.A<-quantile(A[(burn.in+1):iterations],probs=.5,names=F)
median.T1<-quantile(T1[(burn.in+1):iterations],probs=.5,names=F)
median.T2<-quantile(T2[(burn.in+1):iterations],probs=.5,names=F)
median.N<-quantile(N[(burn.in+1):iterations],probs=.5,names=F)
fits<-rep(0,tau)
for (k in 1:tau){
fits[k]<-(median.N)*(median.A*dgeom(k,prob=1/(1+median.T1))+(1-median.A)*dgeom(k,prob=1/(1+median.T2)))
}
fitted.values<-data.frame(cbind(j=seq(1,tau),fits,count=freqdata[,2]))
### Step 6: estimate thinning to reduce correlated posterior samples
lags<-acf(N[(burn.in+1):iterations],type="correlation",main="Autocorr plot",ylab="ACF",xlab="Lag", plot=F)
lag.thin<-suppressWarnings(min(which(lags$acf<0.1)))
if (lag.thin==Inf) {lag.thin<-paste(">",length(lags$lag),sep="")
}
### Step 7: results
hist.points<-hist(N[(burn.in+1):iterations]+w-w.tau,breaks=seq(w,max(N)+w-w.tau+1)-0.5, plot = FALSE)
results<-data.frame(w=w,
n=n,
NP.est.N=NP.est.n0+w,
tau=tau,
w.tau=w.tau,
n.tau=n.tau,
iterations=iterations-burn.in,
burn.in=burn.in,
acceptance.rate.T1T2=1,
acceptance.rate.N=a1/iterations,
lag=lag.thin,
mode.N=hist.points$mids[which.max(hist.points$density)],
mean.N=mean(N[(burn.in+1):iterations])+w-w.tau,
median.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.5,names=F),
LCI.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.025,names=F),
UCI.N=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.975,names=F),
stddev.N=sd((N[(burn.in+1):iterations]+w-w.tau)),
mean.D=mean.D,
median.D=median.D,
DIC
)
final_results <- list()
final_results$est <- results$median.N
final_results$ci <- c("lower 95%"=results$LCI.N, "upper 95%"=results$UCI.N)
final_results$mean <- results$mean.N
final_results$semeanest <- results$stddev.N
final_results$dic <- DIC
final_results$fits <- fitted.values
final_results$diagnostics<-c("acceptance rate N"=results$acceptance.rate.N,
"acceptance rate T1T2"=results$acceptance.rate.T1T2,
"lag"=results$lag)
if (output) {
# output results and fitted values
print(final_results)
}
if (plot) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1,2))
## posterior histogram
hist(N[(burn.in+1):iterations]+w-w.tau,
main="Posterior distribution",xlab="Total Number of Species",
col='purple',freq=F,ylab="Density")
# make trace plot
plot((burn.in+1):iterations,N[(burn.in+1):iterations]+w-w.tau,type="l",xlab="Iteration Number",ylab="Total Number of Species", main="Trace plot")
}
if (answers) {
return(final_results)
}
}
#' Estimate species richness with an objective Bayes method using a geometric model
#'
#' @param data TODO(Kathryn)(Kathryn)
#' @param output TODO(Kathryn)(Kathryn)
#' @param plot TODO(Kathryn)(Kathryn)
#' @param answers TODO(Kathryn)(Kathryn)
#' @param tau TODO(Kathryn)
#' @param burn.in TODO(Kathryn)
#' @param iterations TODO(Kathryn)
#' @param Metropolis.stdev.N TODO(Kathryn)
#' @param Metropolis.start.theta TODO(Kathryn)
#' @param Metropolis.stdev.theta TODO(Kathryn)
#'
#' @return A list of results, including \item{est}{the median of estimates of N}, \item{ci}{a confidence interval for N},
#' \item{mean}{the mean of estimates of N}, \item{semeanest}{the standard error of mean estimates},
#' \item{dic}{the DIC of the model}, \item{fits}{fitted values}, and \item{diagnostics}{model diagonstics}.
#'
#' @importFrom graphics hist par plot
#'
#' @export
objective_bayes_geometric <- function(data,
output=TRUE,
plot=TRUE, answers=FALSE,
tau=10, burn.in=100,
iterations=2500,
Metropolis.stdev.N=75,
Metropolis.start.theta=1,
Metropolis.stdev.theta=0.3) {
data <- check_format(data)
if (tau > max(data[,1])) {
tau <- max(data[,1])
}
fullfreqdata <- data
# calculate NP estimate of n0
w<-sum(fullfreqdata[,2])
n<-sum(fullfreqdata[,1]*fullfreqdata[,2])
# subset data below tau
freqdata<-fullfreqdata[1:tau,]
# calculate summary statistics and MLE estimate of n0 and C
w.tau<-sum(freqdata[,2])
n.tau<-sum(freqdata[,1]*freqdata[,2])
R.hat<-(n.tau/w.tau-1)
MLE.est.n0<-w.tau/R.hat
MLE.est.N<-MLE.est.n0+w
### Step 3: calculate posterior
## initialization
iterations <- iterations+burn.in
N<-rep(0,iterations)
R<-c(Metropolis.start.theta,rep(1,iterations-1))
# to track acceptance rate of theta
a1<-0
# to track acceptance rate of N
a2<-0
# starting value based on MLE of C
N[1]<-ceiling(MLE.est.N)
D.post<-rep(0,iterations)
for (i in 2:iterations){
## sample from p(theta|C,x)
# propose value for theta
R.new <- abs(rnorm(1, mean=R[i-1],
sd=Metropolis.stdev.theta))
# calculate log of acceptance ratio
logr1 <- (-N[i-1]-n.tau-1/2) * log(1+R.new) +
(n.tau-1/2)*log(R.new) -
(-N[i-1]-n.tau-1/2) * log(1+R[i-1]) -
(n.tau-1/2)*log(R[i-1])
# calculate acceptance ratio
r1<-exp(logr1)
# accept or reject propsed value
if (runif(1)<min(r1,1)) {
R[i]<-R.new ; a1<-a1+1
}
else {
R[i]<-R[i-1]
}
## sample from p(C|theta,x)
## make sure N.new >=w.tau
repeat {
N.new<-rnbinom(1,mu=N[i-1],size=Metropolis.stdev.N)
if(N.new>w.tau-1)
break
}
## calculate log(N.new!/(N.new-w.tau)!)
N3.new<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.new[j+1]<-log(N.new-j)
}
N2.new<-sum(N3.new)
## calculate log(N[i-1]!/(N[i-1]-w.tau)!)
N3<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3[j+1]<-log(N[i-1]-j)
}
N2<-sum(N3)
# calculate log of acceptance ratio
logr2<-(N2.new-(1/2)*log(N.new)-N.new*log(1+R[i]))-(N2-(1/2)*log(N[i-1])-N[i-1]*log(1+R[i]))+(log(dnbinom(N[i-1],mu=N.new,size=Metropolis.stdev.N)))-(log(dnbinom(N.new,mu=N[i-1],size=Metropolis.stdev.N)))
# calculate acceptance ratio
r2<-exp(logr2)
# accept or reject propsed value
if (runif(1)<min(r2,1)) {
N[i]<-N.new ; a2<-a2+1
}
else {
N[i]<-N[i-1]
}
## calculate deviance from current sample
# calculate log(N[i]!/(N[i]-w.tau)!)
N3.curr<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.curr[j+1]<-log(N[i]-j)
}
N2.curr<-sum(N3.curr)
# calculate deviance
D.post[i]<-(-2)*(N2.curr-sum(log(factorial(freqdata[,2])))+n.tau*log(R[i])+(-N[i]-n.tau)*log(1+R[i]))
}
### Step 4: model diagnostics
## 1) deviance at posterior mean
mean.R<-mean(R[(burn.in+1):iterations])
mean.N<-mean(N[(burn.in+1):iterations])
## calculate log(mean.N!/(mean.N-w.tau)!)
N3.mean<-rep(0,w.tau)
for (j in 0:(w.tau-1)){
N3.mean[j+1]<-log(mean.N-j)
}
N2.mean<-sum(N3.mean)
loglik.post.mean<-N2.mean-sum(log(factorial(freqdata[,2])))+n.tau*log(mean.R)-(mean.N+n.tau)*log(1+mean.R)
D.mean<-(-2)*loglik.post.mean
## 2) posterior mean and median deviances
mean.D<-mean(D.post[(burn.in+1):iterations])
median.D<-quantile(D.post[(burn.in+1):iterations],probs=.5,names=F)
## 3) model complexity
p.D<-mean.D-D.mean
## 4) Deviance information criterion
DIC<-2*p.D+D.mean
### Step 5: fitted values based on medians of the marginal posteriors
median.R<-quantile(R[(burn.in+1):iterations],probs=.5,names=F)
median.N<-quantile(N[(burn.in+1):iterations],probs=.5,names=F)
fits<-rep(0,tau)
for (k in 1:tau){
fits[k]<-(median.N)*dexp(k,1/(1+median.R))
}
fitted.values<-data.frame(cbind(j=seq(1,tau),fits,count=freqdata[,2]))
### Step 6: results
hist.points<-hist(N[(burn.in+1):iterations]+w-w.tau,breaks=seq(w,max(N)+w-w.tau+1)-0.5, plot = plot)
results<-data.frame(w=w,
n=n,
MLE.est.C=MLE.est.N,
tau=tau,
w.tau=w.tau,
n.tau=n.tau,
iterations=iterations,
burn.in=burn.in,
acceptance.rate.theta=a1/iterations,
acceptance.rate.N=a2/iterations,
mode.C=hist.points$mids[which.max(hist.points$density)],
mean.C=mean(N[(burn.in+1):iterations])+w-w.tau,
median.C=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.5,names=F),
LCI.C=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.025,names=F),
UCI.C=quantile(N[(burn.in+1):iterations]+w-w.tau,probs=.975,names=F),
stddev.C=sqrt(var((N[(burn.in+1):iterations]+w-w.tau))),
mean.D=mean.D,
median.D=median.D,
DIC
)
final_results <- list()
final_results$results <- t(results)
final_results$fits <- fitted.values
if (output) {
# output results and fitted values
print(final_results)
}
if (plot) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(2,2))
# trace plot for C
# first thin values of C if there are more than 10,000 iterations
# must be a divisor of (iterations-burn.in)
iterations.trace<-min(10000,iterations-burn.in)
N.thin<-rep(0,iterations.trace)
for (k in 1:iterations.trace){
N.thin[k]<-N[k*((iterations-burn.in)/iterations.trace)]
}
# make trace plot
plot(1:iterations.trace,N.thin,xlab="Iteration Number",ylab="Total Number of Species", main="Trace plot")
# autocorrelation plot for C
acf(N[(burn.in+1):iterations],type="correlation",main="Autocorr plot",ylab="ACF",xlab="Lag")
# histogram of C with a bar for each discrete value
hist(N[(burn.in+1):iterations]+w-w.tau,breaks=seq(w,max(N[
(burn.in+1):iterations])+w-w.tau+1)-0.5,main="Posterior distribution",xlab="Total Number of Species",
col='purple',freq=F,ylab="Density")
}
if (answers) {
return(final_results)
}
}
rmvnorm <- function (n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)),
method = c("eigen", "svd", "chol"), pre0.9_9994 = FALSE) {
if (!isSymmetric(sigma, tol = sqrt(.Machine$double.eps),
check.attributes = FALSE)) {
stop("sigma must be a symmetric matrix")
}
if (length(mean) != nrow(sigma))
stop("mean and sigma have non-conforming size")
method <- match.arg(method)
R <- if (method == "eigen") {
ev <- eigen(sigma, symmetric = TRUE)
if (!all(ev$values >= -sqrt(.Machine$double.eps) * abs(ev$values[1]))) {
warning("sigma is numerically not positive semidefinite")
}
t(ev$vectors %*% (t(ev$vectors) * sqrt(pmax(ev$values,
0))))
}
else if (method == "svd") {
s. <- svd(sigma)
if (!all(s.$d >= -sqrt(.Machine$double.eps) * abs(s.$d[1]))) {
warning("sigma is numerically not positive semidefinite")
}
t(s.$v %*% (t(s.$u) * sqrt(pmax(s.$d, 0))))
}
else if (method == "chol") {
R <- chol(sigma, pivot = TRUE)
R[, order(attr(R, "pivot"))]
}
retval <- matrix(rnorm(n * ncol(sigma)), nrow = n, byrow = !pre0.9_9994) %*%
R
retval <- sweep(retval, 2, mean, "+")
colnames(retval) <- names(mean)
retval
}
|
a75795d0f97520cf6dddd622b5beff2aadbf2110
|
1b7a6d3cb7abe17ffbee71262cf3771f98a1323c
|
/MoreConcepts.R
|
859f2cefc587cabb42e3419551f4e89dde3f417f
|
[] |
no_license
|
johnpilbeam/r-datasci-work
|
33057f9d9d9987011b150619306093dcb86e9d42
|
9ee0fe2a10ecc58f6079752a8f80c2cd1cc3c9fb
|
refs/heads/master
| 2020-04-26T04:06:06.302690 | 2019-03-28T16:20:06 | 2019-03-28T16:20:06 | 173,290,055 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 3,239 |
r
|
MoreConcepts.R
|
# Download each of the data sets for 2006, 2007, 2008
df1 <- read.csv("~/Documents/r-datasci/2006.csv")
df2 <- read.csv("~/Documents/r-datasci/2007.csv")
df3 <- read.csv("~/Documents/r-datasci/2008.csv")
myDF <- rbind(df1,df2,df3)
dim(myDF)
rm(df1,df2,df3)
head(myDF)
tail(myDF)
unique(myDF$Year)
# Quiz #17 - Answer: 686993
sum(myDF$Origin == "LAX")
# 4.3: Efficiently Storing Origin-to-Destination Flight Paths
myTable <- table(list(myDF$Origin, myDF$Dest))
head(myTable)
# My Table has 315 rows and 321 columns
dim(myTable)
# How many entries are zeros? 94849!
sum(myTable == 0)
# How many entries are not zeros? 6266!
sum(myTable != 0)
myNewTable <- table(paste(myDF$Origin, myDF$Dest))
length(myNewTable)
# sum(myDF$Origin == "IND", myDF$Origin == "ORD")
# Q2Table <- table(list(myDF$Origin == "BOS", myDF$Origin == "DEN"))
plot(myNewTable)
dotchart(myNewTable)
dotchart(sort(myNewTable))
plot(myTable["IND",])
dotchart(myTable["IND",])
# Save flight data into a vector
v <- myTable["IND",]
# 4.5: Visualizing Flight Paths
# Dot chart plot of flights from IND to airports that have at least one flight
dotchart(sort(v[v != 0]))
# IND destinations with at least 4000 flights
dotchart(sort(v[v > 4000]))
MyV <- myTable["JFK",]
dotchart(sort(MyV[MyV > 5000]))
# 4.6 Incorporating Auxiliary Data about Airports
# Importing data about the airports
airportsDF <- read.csv("~/Documents/r-datasci/airports.csv")
dim(airportsDF)
head(airportsDF$iata)
airportsDF[airportsDF$iata == "IND",]
# 4.7: Incorporating Auxiliary Data about Airports
# Store airport, city and state of Airports in a vector
w <- paste(airportsDF$airport, airportsDF$city, airportsDF$state, sep=", ")
head(w)
tail(w)
names(w) <- airportsDF$iata
w[c("IND", "ORD", "MDW")]
w["CMH"]
w["Chicago"]
# Quiz #19
sum(airportsDF$city == "Chicago", na.rm = T)
# 4.9 Revising Visualizations of Flight Paths
v[v > 4000]
names(v[v > 4000])
w["ORD"]
myVec <- v[v > 4000]
names(myVec) <- w[names(v[v > 4000])]
myVec
dotchart(myVec)
dotchart(sort(myVec))
# 4.10 Identifying Airports with Commercial Flights
head(airportsDF)
table(airportsDF$state)
subset(airportsDF, state == "IN")
indyairports <- subset(airportsDF, state == "IN")
# we can make a table that shows all of the flight counts
# (as origins) for all airports in the full data set
# from 2006 to 2008 (not just Indiana airports)
table(myDF$Origin)
table(myDF$Origin)["IND"]
table(myDF$Origin)["ORD"]
# These are the 3-letter airport codes for the airports in Indiana
as.character(indyairports$iata)
table(myDF$Origin)[as.character(indyairports$iata)]
v <- table(myDF$Origin)[as.character(indyairports$iata)]
v[!is.na(v)]
names(v[!is.na(v)])
subset(airportsDF, iata %in% names(v[!is.na(v)]))
# 4.12 Creating and Applying Functions Built by the Learner
mystate <- "IN"
myairports <- subset(airportsDF, state == mystate)
myairports
table(myDF$Origin)[as.character(myairports$iata)]
activeairports <- function(mystate) {
myairports <- subset(airportsDF, state == mystate)
v <- table(myDF$Origin)[as.character(myairports$iata)]
subset(airportsDF, iata %in% names(v[!is.na(v)]))
}
activeairports("IN")
activeairports("IL")
activeairports("CA")
sapply(state.abb,function(x)
dim(activeairports(x))[1])
|
e5de1c105a78728c5ac16947c204cfad7a42bc22
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/banter/examples/addBanterDetector.Rd.R
|
d2119eca247fcc64fd1760276faf4fa7a9011f0a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 552 |
r
|
addBanterDetector.Rd.R
|
library(banter)
### Name: addBanterDetector
### Title: Add a BANTER Detector Model
### Aliases: addBanterDetector removeBanterDetector
### ** Examples
data(train.data)
# initialize BANTER model with event data
bant.mdl <- initBanterModel(train.data$events)
# add the 'bp' (burst pulse) detector model
bant.mdl <- addBanterDetector(
x = bant.mdl,
data = train.data$detectors$bp,
name = "bp",
ntree = 50, sampsize = 1, num.cores = 1
)
bant.mdl
# remove the 'bp' detector model
bant.mdl <- removeBanterDetector(bant.mdl, "bp")
bant.mdl
|
bde4c56c8368ffab5ba2b7bc32778a18d58a1093
|
6f56fdd53e87575377b95b95280f21fe215cab0d
|
/man/create_empty_rtweet_tbl.Rd
|
8e20cc93919bbc71ee125b8725783d4e63bbfe2d
|
[
"MIT"
] |
permissive
|
urswilke/rtweettree
|
ab9603adb1801cf622789d3eef820b83e199dbbf
|
cfabadf5b38d2946f917b71fa6d499cf3d70108b
|
refs/heads/master
| 2023-08-11T14:22:24.597334 | 2021-10-07T23:58:12 | 2021-10-07T23:58:12 | 284,338,760 | 6 | 0 |
NOASSERTION
| 2021-10-02T19:14:58 | 2020-08-01T21:06:18 |
R
|
UTF-8
|
R
| false | true | 446 |
rd
|
create_empty_rtweet_tbl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{create_empty_rtweet_tbl}
\alias{create_empty_rtweet_tbl}
\title{Create an empty rtweet tibble}
\usage{
create_empty_rtweet_tbl()
}
\value{
An empty tibble with columns of the type that, e.g. rtweet::lookup_statuses() produces
}
\description{
Create an empty rtweet tibble
}
\examples{
df <- rtweettree:::create_empty_rtweet_tbl()
df
}
\keyword{internal}
|
9086f2b3fc56092ae632f5718e503c6905654f52
|
af31c9e40581eb197adc156f5524a0d2bdc22b78
|
/plot3.R
|
33de91bde7b181ad5116bfb3a6261fd5c5070a22
|
[] |
no_license
|
sammarten/ExData_Plotting1
|
fbd27570684e3d7f23b74e5e64627793e9631691
|
75fbed27ea33bf982638a348ed9fd9df843205e8
|
refs/heads/master
| 2021-01-21T00:48:06.177217 | 2014-08-08T23:06:06 | 2014-08-08T23:06:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,213 |
r
|
plot3.R
|
# Assumption that "household_power_consumption.txt" is in working directory
data <- read.csv("household_power_consumption.txt",
sep=";",
stringsAsFactors=FALSE,
colClasses=c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"),
na.strings=c("?"))
# Pull out values for Feb 1-2, 2007
data <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
# Concatenate Date and Time column with a space in-between
# Convert Date column to be of type POSIXlt
data$Date <- strptime(paste(data$Date, data$Time, sep=" "), format="%d/%m/%Y %T")
# Create png file and plot stair step graph
png(filename="plot3.png", bg="transparent")
plot(data$Date, data$Sub_metering_1,
type="s", xlab="", ylab="Energy sub metering")
# Add additional data to the plot
points(data$Date, data$Sub_metering_2, type="s", col="red")
points(data$Date, data$Sub_metering_3, type="s", col="blue")
# Add legend
legend("topright", col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1))
dev.off()
|
e1e1e64bf5b07d9f0c5e9e01ec8b8b0dfb955d4e
|
332eb3e452b905363ffa89745d772f43e658a1cd
|
/R/msgfParTda.R
|
ed21d434e03e8ddbac4cef0dfc70a4641f610958
|
[] |
no_license
|
thomasp85/MSGFplus-release
|
cff4dc347d3b260a16ee9236bd3397b909078f1f
|
f3dd2a93ff0cc05aabb7dc0cdacdf12a1e3d5d59
|
refs/heads/master
| 2016-08-04T20:24:37.233950 | 2015-02-03T08:52:12 | 2015-02-03T08:52:12 | 25,248,232 | 0 | 1 | null | null | null | null |
UTF-8
|
R
| false | false | 1,747 |
r
|
msgfParTda.R
|
#' A class handling use of target-decoy approach for FDR estimation
#'
#' This class defines whether to use target-decoy approach and provides methods
#' to get correct system call parameters.
#'
#' @slot tda A boolean defining whether to use tda or not
#'
#' @examples
#' tda <- msgfParTda(TRUE)
#'
#' @family msgfParClasses
#'
setClass(
Class='msgfParTda',
representation=representation(
tda='logical'
),
validity=function(object){
if(length(object@tda) == 1){
return(TRUE)
} else {
return('tda can only be of length 1')
}
},
prototype=prototype(
tda=as.logical(NA)
)
)
#' @describeIn msgfParTda Short summary of msgfParTda object
#'
#' @param object An msgfParTda object
#'
setMethod(
'show', 'msgfParTda',
function(object){
if(length(object) == 0){
cat('An empty msgfParTda object\n')
} else {
cat(object@tda, '\n')
}
}
)
#' @describeIn msgfParTda Report the length of an msgfParTda object
#'
#' @param x An msgfParTda object
#'
#' @return For length() An integer.
#'
setMethod(
'length', 'msgfParTda',
function(x){
if(is.na(x@tda)){
0
} else {
1
}
}
)
#' @describeIn msgfParTda Get \code{\link[base]{system}} compliant function call
#'
#' @return For getMSGFpar() A string.
#'
setMethod(
'getMSGFpar', 'msgfParTda',
function(object){
if(length(object) == 0){
''
} else if(object@tda){
'-tda 1'
} else {
'-tda 0'
}
}
)
#' @rdname msgfParTda-class
#'
#' @param value A boolean defining whether to use tda or not
#'
#' @return For msgfParTda() An msgfParTda object.
#'
#' @export
#'
msgfParTda <- function(value){
if(missing(value)){
new(Class='msgfParTda')
} else {
new(Class='msgfParTda', tda=value)
}
}
|
c7e2ea0e6c1948bba0f812a0dd39cd2890224fe7
|
caad99a2eb7e431beefb3a04c2a31350e64951c7
|
/decision tree.R
|
5c46fdcc14e2fc696ae8540687d446f7277c667b
|
[] |
no_license
|
tonyk7440/kaggle_titanic_dataset
|
48ec388eb0cc036576350b209565b710bad522d8
|
4c5c72635cfc4362c067ce2d89cf8d3688d29ea5
|
refs/heads/master
| 2021-01-10T01:55:52.196467 | 2016-02-01T21:11:43 | 2016-02-01T21:11:43 | 50,435,308 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,070 |
r
|
decision tree.R
|
#Decision Trees
# train and test set are still loaded in
str(train)
str(test)
library(rpart)
# Build the decision tree
my_tree_two <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
data = train,
method ="class")
# Visualize the decision tree using plot() and text()
plot(my_tree_two)
text(my_tree_two)
# Load in the packages to create a fancified version of your tree
library(rattle)
library(rpart.plot)
library(RColorBrewer)
# Time to plot your fancy tree
fancyRpartPlot(my_tree_two)
#Predict & submit to kaggle
# Make your prediction using the test set
my_prediction <- predict(my_tree_two, test, type = "class")
# Create a data frame with two columns: PassengerId & Survived. Survived contains your predictions
my_solution <- data.frame(PassengerId = test$PassengerId, Survived = my_prediction)
# Check that your data frame has 418 entries
nrow(my_solution)
# Write your solution to a csv file with the name my_solution.csv
write.csv(my_solution, file="my_solution.csv" , row.names=FALSE)
|
e1e0b08f7f9afde79b56b450ac1ef5ef29278a7f
|
4c699cae4a32824d90d3363302838c5e4db101c9
|
/06_Regressao_com_R/03-FeatureSelection.R
|
896ca026789c8762a25ab4417019cd9bd9c397c9
|
[
"MIT"
] |
permissive
|
janes/BigData_Analytics_com_R
|
470fa6d758351a5fc6006933eb5f4e3f05c0a187
|
431c76b326e155715c60ae6bd8ffe7f248cd558a
|
refs/heads/master
| 2020-04-27T19:39:10.436271 | 2019-02-06T11:29:36 | 2019-02-06T11:29:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,181 |
r
|
03-FeatureSelection.R
|
# Feature Selection
# ... cont do script 02
dim(bikes)
any(is.na(bikes))
# Criando um modelo para identificar os atributos com maior importancia para o modelo preditivo
require(randomForest)
# Avaliando a importanci de todas as variaveis
modelo <- randomForest(cnt ~ .,
data = bikes,
ntree = 100,
nodesize = 10,
importance = TRUE)
# Removendo variaveis colineares
modelo <- randomForest(cnt ~ . - count
- mnth
- hr
- workingday
- isWorking
- dayWeek
- xforHr
- workTime
- holiday
- windspeed
- monthCount
- weathersit,
data = bikes,
ntree = 100,
nodesize = 10,
importance = TRUE)
# Plotando as variaveis por grau de importancia
varImpPlot(modelo)
#Granvando o resultado
df_saida <- bikes[, c("cnt", rownames(modelo$importance))]
df_saida
|
046245c4e30e7d8d803206a1035eaaeed725b1f7
|
ea1c371421755474c644854cfec37962d9be468e
|
/scripts/code1.r
|
e6d638d0484d6126dd424ed969f5b2b753ef5711
|
[] |
no_license
|
verm0nter21/testgit
|
f835b8ee1a4d2478f07c5684ddfbd273e3a0a2db
|
f9babcb53fa81ec5c5ca335fb0ed3ef9793c8083
|
refs/heads/master
| 2020-03-21T23:07:27.675802 | 2018-06-29T15:56:06 | 2018-06-29T15:56:06 | 139,167,636 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 85 |
r
|
code1.r
|
# this is not really an R script but it is a comment
# this is version 1, the master
|
f53feba589776c8a79ade80fdeefcbb9c6412117
|
e9b8841424aff6f0a47f61d3a7f64796c8b1e4b4
|
/Rscripts/getESCCfitGenes.R
|
7ff74b054404445221691849ae285d1c64b132ec
|
[] |
no_license
|
2waybene/MustARD
|
e0b3361c59d1262b331ff06d09f244bb732f2b8a
|
e856eff2945f873a32ba09c8e666caeef8d1e769
|
refs/heads/master
| 2021-07-15T17:24:20.772322 | 2021-02-22T21:29:43 | 2021-02-22T21:29:43 | 240,051,670 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 662 |
r
|
getESCCfitGenes.R
|
setwd("X:/project2020/MustARD/learningFromBigWigs/Behan_nature_CRISPR-Cas9/")
ESCC.cell.lines <- read.csv("esophagus_CellLines.csv")
fit.genes <- read.csv("fitness_genes_all.csv")
dim(fit.genes)
ESCC <- ESCC.cell.lines$CMP_id[ ESCC.cell.lines$CancerType %in% "Esophageal Squamous Cell Carcinoma"]
length(which(colnames (fit.genes) %in% ESCC))
#19, missing "SIDM00249"
ESCC[-which(ESCC %in% colnames (fit.genes))]
#[1] SIDM00249
ESCC.fit.genes <- fit.genes[, (which(colnames (fit.genes) %in% ESCC))]
dim(ESCC.fit.genes)
apply (ESCC.fit.genes[,-1], SUM)
ESCC.genes <- ESCC.fit.genes[,1][which(rowSums (ESCC.fit.genes[,-1]) == 18)]
|
5522b004505b1317d03309e91b74af5c2eb96a06
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mason/examples/polish.Rd.R
|
1e31e7a68a00cbb0f72ae46920b961e08c683ad0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 698 |
r
|
polish.Rd.R
|
library(mason)
### Name: polish
### Title: Do some final polishing of the scrubbed mason analysis data.
### Aliases: polish polish_renaming polish_filter
### polish_transform_estimates polish_adjust_pvalue
### ** Examples
library(magrittr)
ds <- swiss %>%
design('glm') %>%
add_settings() %>%
add_variables('yvar', c('Fertility', 'Education')) %>%
add_variables('xvar', c('Agriculture', 'Catholic')) %>%
add_variables('covariates', 'Examination') %>%
construct() %>%
scrub()
polish_renaming(ds, function(x) gsub('Education', 'Schooling', x))
polish_filter(ds, 'Xterm', 'term')
polish_adjust_pvalue(ds)[c('p.value', 'adj.p.value')]
polish_transform_estimates(ds, function(x) exp(x))
|
47023a1e30559af42bee9cd4e02e95454dc07dd6
|
9f727ce9fded2d1082f8473bf9c353c4dc524eca
|
/partsm/man/acf.ext1.Rd
|
52336da6db3b445faadc96a13fa7155f19ec50ec
|
[] |
no_license
|
MatthieuStigler/partsm
|
7043eabf882eecda2cef25ef3de92af4d06f7d02
|
f342e1966083b6f5d7ce520af0395eab1d4a6a54
|
refs/heads/master
| 2021-05-16T02:57:35.563197 | 2020-11-25T03:48:30 | 2020-11-25T03:48:30 | 18,262,949 | 3 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 2,738 |
rd
|
acf.ext1.Rd
|
\name{acf.ext1}
\alias{acf.ext1}
\title{Autocorrelation function for several transformations of the original data}
\description{
This function is based on the \link[stats]{acf} function and extends it by allowing for some transformations of the data before computing the autocovariance or autocorrelation function.
}
\usage{
acf.ext1 (wts, transf.type, perdiff.coeffs, type, lag.max, showcat, plot)
}
\arguments{
\item{wts}{a univariate time series object.
}
\item{transf.type}{a character string indicating what transformation should be applied to the data. Allowed values are "orig", "fdiff", "sdiff", "fsdiff", "fdiffsd", "perdiff", and ""perdiffsd. See details.
}
\item{perdiff.coeffs}{a vector with the estimates coefficients for the periodic difference filter. This argument is only required when the periodic difference transformation must be applied to the data. See details.
}
\item{type}{a character string giving the type of acf to be computed. Allowed values are "correlation",
"covariance" or "partial".
}
\item{lag.max}{maximum number of lags at which to calculate the acf.
}
\item{showcat}{a logical. If TRUE, the results are printed in detail. If FALSE, the results are stored as a list object.
}
\item{plot}{a logical. If TRUE, a plot of the acf is showed.
}
}
\details{The implemented transformations are the following:
\itemize{
\item "orig": Original series.
\item "fdiff": First differences of the original series.
\item "sdiff": Seasonal differences of the original series.
\item "fsdiff": Fisrt and seasonal differences of the original series.
\item "fdiffsd": Residuals of the first differences on four seasonal dummy variables.
\item "perdiff": Periodic differences of the original series.
\item "perdiffsd": Residuals of the periodic differences on four seasonal dummy variables.
}
}
\seealso{
\code{\link[stats]{acf}}.
}
\value{
Lags at which the acf is computed, estimates of the acf, and p-values for the significance of the acf at each lag.
}
\author{Javier Lopez-de-Lacalle \email{[email protected]}.}
\examples{
## Logarithms of the Real GNP in Germany
data("gergnp")
lgergnp <- log(gergnp, base=exp(1))
out <- acf.ext1(wts=lgergnp, transf.type="orig",
type="correlation", lag.max=12, showcat=TRUE, plot=FALSE)
out <- acf.ext1(wts=lgergnp, transf.type="perdiffsd",
perdiff.coeff = c(1.004, 0.981, 1.047, 0.969),
type="correlation", lag.max=12, showcat=TRUE, plot=FALSE)
}
\keyword{misc}
|
503e746356e1b743942863575fe1ea12ea40f51e
|
aaf8222e2e7c1ca3480092387472ed539e79985a
|
/man/SplitAuthor.Rd
|
56620539822c62ac42c710a2fd55a2f0e93db4c9
|
[] |
no_license
|
M3SOulu/MozillaApacheDataset-Rpackage
|
57e7028f2d2ee9a6a672a9775f20bf40af9e4f4a
|
3644dbd266325309be4bfdf1ac926ae8859ebd19
|
refs/heads/master
| 2022-06-23T11:56:58.580415 | 2022-06-20T11:03:39 | 2022-06-20T11:03:39 | 238,914,906 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 339 |
rd
|
SplitAuthor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/identities.R
\name{SplitAuthor}
\alias{SplitAuthor}
\title{Split author}
\usage{
SplitAuthor(author.key)
}
\arguments{
\item{author.key}{The author fields.}
}
\value{
A list of splitted authors.
}
\description{
Split Git author fields based on various regex.
}
|
8d981cf091b46aeca9032202901c856e5fdbf1d7
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlesheetsv4.auto/man/BatchUpdateValuesRequest.Rd
|
09788ea44a689dcc9ff123dd8e9ac34a6448b3cc
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 1,052 |
rd
|
BatchUpdateValuesRequest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{BatchUpdateValuesRequest}
\alias{BatchUpdateValuesRequest}
\title{BatchUpdateValuesRequest Object}
\usage{
BatchUpdateValuesRequest(valueInputOption = NULL, data = NULL,
responseDateTimeRenderOption = NULL, responseValueRenderOption = NULL,
includeValuesInResponse = NULL)
}
\arguments{
\item{valueInputOption}{How the input data should be interpreted}
\item{data}{The new values to apply to the spreadsheet}
\item{responseDateTimeRenderOption}{Determines how dates, times, and durations in the response should be}
\item{responseValueRenderOption}{Determines how values in the response should be rendered}
\item{includeValuesInResponse}{Determines if the update response should include the values}
}
\value{
BatchUpdateValuesRequest object
}
\description{
BatchUpdateValuesRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The request for updating more than one range of values in a spreadsheet.
}
|
2d99b0143c6c337080dd742409052f4446ec03df
|
c6683226b4317a677e43475b31a743b7ad273410
|
/LML_Tidy_Helpers.R
|
7c5385159399d155bc2a3ec6cfc406c9f13b420f
|
[] |
no_license
|
henwood-dev/logmylife
|
a3f754ad41f61c27d9fc977cb86eb018198c356b
|
425f75b78c2bc3a69f1aff1320aec3373f30184b
|
refs/heads/master
| 2020-03-29T23:28:50.285911 | 2020-03-21T01:32:15 | 2020-03-21T01:32:15 | 125,543,473 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 13,487 |
r
|
LML_Tidy_Helpers.R
|
library(sjlabelled)
library(splitstackshape)
library(data.table)
library(doParallel)
library(parallel)
library(iterators)
library(foreach)
library(haven)
library(tidyverse)
library(bit64)
select <- dplyr::select
write_prompt_responses <- function(data_dirname, wockets_dirname, manual_dirname = NULL, skip_manual = TRUE){
#ids <- read_delim(paste(data_dirname,"file_ids.txt",sep = "/"), delim = ",", col_names = "id")
prompt_response_files <- read_file_list(data_dirname,wockets_dirname,"surveys","lml_com$","Prompts.csv$", hour_filter = FALSE)
raw_prompts <- lapply(prompt_response_files,read_ema, data_dirname = data_dirname, suffix_dirname = wockets_dirname)
if(!skip_manual){
manual_prompt_response_files <- read_file_list(data_dirname,manual_dirname,"surveys","lml_com$","Prompts.csv$", hour_filter = FALSE)
manual_raw_prompts <- lapply(manual_prompt_response_files,read_ema, data_dirname = data_dirname, suffix_dirname = manual_dirname)
}
prompt_responses <- raw_prompts[[1]]
for(i in 2:length(raw_prompts)){
prompt_responses <- bind_rows(prompt_responses,raw_prompts[[i]])
}
if(!skip_manual){
manual_prompt_responses <- manual_raw_prompts[[1]]
for(i in 2:length(manual_raw_prompts)){
manual_prompt_responses <- bind_rows(manual_prompt_responses,manual_raw_prompts[[i]])
}
merge_responses <- anti_join(manual_prompt_responses,prompt_responses, by = c("system_file","TimeStampPrompted"))
pre_filtered_prompts <- bind_rows(prompt_responses,merge_responses)
} else {
pre_filtered_prompts <- prompt_responses
}
write_csv(pre_filtered_prompts,paste(data_dirname,"prompt_responses.csv", sep = "/"))
return(pre_filtered_ema)
}
pipe_print <- function(data_to_pass, string_to_print){
print(string_to_print)
return(data_to_pass)
}
stata_varcheck_setnames <- function(dataset){
longvars <- c("longvars")
for(i in 1:ncol(dataset)){
if(str_length(names(dataset[i])) > 32) {
longvars <- append(longvars,names(dataset[i]))
}
}
longvars[1] <- NA
return(longvars)
}
simplify_sni_id <- function(sni_id){
remove_spaces <- gsub(" ","",sni_id)
remove_periods <- gsub("\\.","",remove_spaces)
remove_quote <- gsub("'","",remove_periods)
make_lower <- tolower(remove_quote)
return(make_lower)
}
write_master_logs <- function(data_dirname, wockets_dirname, manual_dirname = NULL, skip_manual = TRUE,
master_filter = "master"){
masterlog_files <- read_file_list(data_dirname,wockets_dirname,"logs","lml_com$",paste0(master_filter,".log.csv$"))
pre_master_log <- rbindlist(lapply(masterlog_files,skip_fread, data_dirname = data_dirname, suffix_dirname = wockets_dirname), fill = TRUE)
if(!skip_manual){
manual_masterlog_files <- read_file_list(data_dirname,manual_dirname,"logs","lml_com$","master.log.csv$")
manual_master_log <- rbindlist(lapply(manual_masterlog_files,skip_fread, data_dirname = data_dirname, suffix_dirname = manual_dirname), fill = TRUE)
# Switching to Data.Table Paradigm for Fast Master Processing
merge_manual <- anti_join(manual_master_log,pre_master_log, by = c("file_id","V1"))
raw_master_log <- bind_rows(pre_master_log,merge_manual)
} else {
raw_master_log <- as.data.table(pre_master_log)
}
raw_master_log[, V2:= NULL]
write_csv(raw_master_log,paste0(data_dirname,"/",master_filter,"_logs.csv"))
return(raw_master_log)
}
write_gps_logs <- function(data_dirname, wockets_dirname, manual_dirname = NULL, skip_manual = TRUE){
gps_files <- read_file_list(data_dirname,wockets_dirname,"data/","lml_com$","GPS.csv$")
pre_raw_gps_log <- rbindlist(lapply(gps_files,skip_fread, data_dirname = data_dirname, suffix_dirname = wockets_dirname), fill = TRUE)
if(!skip_manual){
manual_gps_files <- read_file_list(data_dirname,manual_dirname,"data","lml_com$","GPS.csv$")
manual_raw_gps_log <- rbindlist(lapply(manual_gps_files,skip_fread, data_dirname = data_dirname, suffix_dirname = manual_dirname), fill = TRUE)
# Switching to Data.Table Paradigm for Fast GPS Processing
merge_manual <- anti_join(manual_raw_gps_log,pre_raw_gps_log, by = c("file_id","V1"))
raw_gps_log <- bind_rows(pre_raw_gps_log,merge_manual)
} else {
raw_gps_log <- pre_raw_gps_log
}
write_csv(raw_gps_log,paste(data_dirname,"gps_logs.csv", sep = "/"))
return(raw_gps_log)
}
write_daily_responses <- function(data_dirname, wockets_dirname, manual_dirname = NULL, skip_manual = TRUE){
dailylog_response_files <- read_file_list(data_dirname,wockets_dirname,"surveys","lml_com$","PromptResponses_Dailylog.csv$", hour_filter = FALSE)
raw_dailylog <- lapply(dailylog_response_files,read_ema, data_dirname = data_dirname, suffix_dirname = wockets_dirname)
if(!skip_manual){
manual_dailylog_response_files <- read_file_list(data_dirname,manual_dirname,"surveys","lml_com$","PromptResponses_Dailylog.csv$", hour_filter = FALSE)
manual_raw_dailylog <- lapply(manual_dailylog_response_files,read_ema, data_dirname = data_dirname, suffix_dirname = manual_dirname)
}
dailylog_responses <- raw_dailylog[[1]]
for(i in 2:length(raw_dailylog)){
dailylog_responses <- bind_rows(dailylog_responses,raw_dailylog[[i]])
}
if(!skip_manual){
manual_dailylog_responses <- manual_raw_dailylog[[1]]
for(i in 2:length(manual_raw_dailylog)){
manual_dailylog_responses <- bind_rows(manual_dailylog_responses,manual_raw_dailylog[[i]])
}
merge_responses <- anti_join(manual_dailylog_responses,dailylog_responses, by = c("system_file","PromptTime"))
pre_filtered_dailylog <- bind_rows(dailylog_responses,merge_responses)
} else {
pre_filtered_dailylog <- dailylog_responses
}
write_csv(pre_filtered_dailylog,paste(data_dirname,"daily_responses.csv", sep = "/"))
return(pre_filtered_dailylog)
}
write_ema_responses <- function(data_dirname, wockets_dirname, id_varstub, filename_varstub, manual_dirname = NULL, skip_manual = TRUE, prepend_surveys = ""){
#ids <- read_delim(paste(data_dirname,"file_ids.txt",sep = "/"), delim = ",", col_names = "id")
ema_response_files <- read_file_list(data_dirname,wockets_dirname,paste(prepend_surveys,"surveys",sep = "/"),id_varstub,filename_varstub, hour_filter = FALSE)
raw_ema <- lapply(ema_response_files,read_ema, data_dirname = data_dirname, suffix_dirname = wockets_dirname)
if(!skip_manual){
manual_ema_response_files <- read_file_list(data_dirname,manual_dirname,paste(prepend_surveys,"surveys",sep = "/"),id_varstub, filename_varstub, hour_filter = FALSE)
manual_raw_ema <- lapply(manual_ema_response_files,read_ema, data_dirname = data_dirname, suffix_dirname = manual_dirname)
}
ema_responses <- raw_ema[[1]]
for(i in 2:length(raw_ema)){
ema_responses <- bind_rows(ema_responses,raw_ema[[i]])
}
if(!skip_manual){
manual_ema_responses <- manual_raw_ema[[1]]
for(i in 2:length(manual_raw_ema)){
manual_ema_responses <- bind_rows(manual_ema_responses,manual_raw_ema[[i]])
}
merge_responses <- anti_join(manual_ema_responses,ema_responses, by = c("system_file","PromptTime"))
pre_filtered_ema <- bind_rows(ema_responses,merge_responses)
} else {
pre_filtered_ema <- ema_responses
}
write_csv(pre_filtered_ema,paste(data_dirname,"ema_responses.csv", sep = "/"))
return(pre_filtered_ema)
}
prebind_data <- function(filtered_data, variable_prefix, name_keys = "", name_value_pairs = "", return_name_columns = FALSE, separator = ",|"){
if(sum(!is.na(filtered_data %>% select(!!variable_prefix))>0)){
filtered_newvars <- cSplit_e(filtered_data, variable_prefix,type = "character", fill = 0, sep = separator)
names(filtered_newvars) <- enc2native(names(filtered_newvars))
selected_data <- filtered_newvars %>%
mutate_at(vars(starts_with(paste0(variable_prefix,"_"))),funs(ifelse(is.na(eval(parse(text = variable_prefix))),NA,.))) %>%
select(starts_with(paste0(variable_prefix,"_")))
if(return_name_columns){
return(names(selected_data))
}
names(selected_data) <- name_keys[names(selected_data)]
new_return <- generate_missing_column(selected_data,get_labels(name_keys))
return_data <- new_return %>%
set_label(unlist(name_value_pairs))
} else {
new_return <- generate_missing_column(filtered_data,get_labels(name_keys))
return_data <- new_return %>%
select(starts_with(paste0(variable_prefix,"_"))) %>%
set_label(unlist(name_value_pairs))
}
return(return_data)
}
generate_missing_column <- function(data_name, column_names){
return_data_name <- data_name
for(i in column_names){
if(!(i %in% names(return_data_name))){
return_data_name <- mutate(return_data_name, !!i := NA)
}
}
return(return_data_name)
}
read_file_list <- function(data_dirname,
midpoint_dirname = NULL,
end_dirname = NULL,
id_filter = "*",
file_filter,
hour_filter = TRUE,
recursive = TRUE){
if(!is.null(midpoint_dirname) & !is.null(end_dirname)){
data_files <- dir(paste(data_dirname,midpoint_dirname, sep = "/"),pattern = id_filter)
data_dirs <- paste(data_dirname,midpoint_dirname,data_files, sep = "/")
date_files <- dir(paste(data_dirs,end_dirname, sep = "/"), full.names = TRUE)
if(hour_filter){
date_files <- dir(date_files, full.names = TRUE)
}
return_files <- list.files(date_files,pattern = file_filter, full.names = TRUE, include.dirs = FALSE, recursive = recursive)
} else {
return_files <- list.files(data_dirname,pattern = file_filter, full.names = TRUE, include.dirs = FALSE, recursive = recursive)
}
return(return_files)
}
read_gps <- function(data_dirname,wockets_dirname,manual_dirname, skip_manual = FALSE, fast_mode = FALSE){
if(!fast_mode){
gps_files <- read_file_list(data_dirname,wockets_dirname,"data/","lml_com$","GPS.csv$")
pre_raw_gps_log <- rbindlist(lapply(gps_files,skip_fread, data_dirname = data_dirname, suffix_dirname = wockets_dirname), fill = TRUE)
if(!skip_manual){
manual_gps_files <- read_file_list(data_dirname,manual_dirname,"data","lml_com$","GPS.csv$")
manual_raw_gps_log <- rbindlist(lapply(manual_gps_files,skip_fread, data_dirname = data_dirname, suffix_dirname = manual_dirname), fill = TRUE)
# Switching to Data.Table Paradigm for Fast GPS Processing
merge_manual <- anti_join(manual_raw_gps_log,pre_raw_gps_log, by = c("file_id","V1"))
raw_gps_log <- bind_rows(pre_raw_gps_log,merge_manual)
} else {
raw_gps_log <- pre_raw_gps_log
}
}
else{
raw_gps_log <- fread(paste(data_dirname,"gps_logs.csv",sep = "/"), colClasses = rep("chr",7))
}
names(raw_gps_log) <- c("a","b","c","d","e","f","file_id")
raw_gps_log[, log_time := as.POSIXct(a, tz = "America/Los_Angeles", format = "%Y-%m-%d %H:%M:%S", origin = "1970-01-01")]
raw_gps_log[, measure_time := as.POSIXct(b, tz = "America/Los_Angeles", format = "%Y-%m-%d %H:%M:%S", origin = "1970-01-01")]
raw_gps_log[is.na(log_time), log_time := as.POSIXct(as.numeric(a)/1000, tz = "America/Los_Angeles", origin = "1970-01-01")]
raw_gps_log[is.na(measure_time), measure_time := as.POSIXct(as.numeric(b)/1000, tz = "America/Los_Angeles", origin = "1970-01-01")]
raw_gps_log[, time_diff := log_time - measure_time]
raw_gps_log[, latitude := as.numeric(c)]
raw_gps_log[, longitude := as.numeric(d)]
raw_gps_log[, accuracy := as.numeric(e)]
raw_gps_log[, c("a","b","c","d","e","f") := NULL]
raw_gps_log[, gps_time_valid := as.integer(time_diff <= 300)]
raw_gps_log[, gps_accuracy_valid := as.integer(accuracy <= 100)]
raw_gps_log[, fulltime := measure_time]
return(raw_gps_log)
}
skip_fread <- function(file, data_dirname, suffix_dirname, supply_header = FALSE){
if(file.size(file) > 0){
try({
return_csv <- fread(file, colClasses = 'character', encoding = "UTF-8", header = supply_header)
idstringlength <- str_length(paste(data_dirname,suffix_dirname,"", sep = "/"))
id <- str_sub(file,idstringlength+4,idstringlength+7)
return_csv[, file_id := id]
new_return_csv <- return_csv[!is.na(V2)]
if(nrow(new_return_csv)>0){
return(new_return_csv)
} else {
return(NULL)
}
})
return(NULL)
} else {return(NULL)}
}
read_sni <- function(sni_stata_filename) {
sni_data <- read_dta(sni_stata_filename) %>%
select(SNI_PID,starts_with("SNI_ID_")) %>%
rename(
subject_id = 1,
sni1 = 2,
sni2 = 3,
sni3 = 4,
sni4 = 5,
sni5 = 6
) %>%
mutate_all(funs(tolower)) %>%
mutate_all(funs(str_replace(.,"\\.",""))) %>%
mutate_all(funs(str_replace(.," ",""))) %>%
mutate_all(funs(str_replace(.,"\\.",""))) %>%
filter(!is.na(subject_id))
return(sni_data)
}
read_ema <- function(file_to_read, data_dirname, suffix_dirname){
discard <- 1
return_ema_file <- as.tibble(data.frame(discard))
try({
file_cols <- ncol(read_csv(file_to_read))
return_ema_file <- read_csv(file_to_read, col_types = str_flatten(rep("c",file_cols)))
idstringlength <- str_length(paste(data_dirname,suffix_dirname,"", sep = "/"))
id <- str_sub(file_to_read,idstringlength+4,idstringlength+7)
return_ema_file$system_file <- id
return_ema_file$discard <- NULL
return(return_ema_file)
}, silent = TRUE)
return(NULL)
}
|
cf083925135c08c0f0c28a61528b2c8597fe7215
|
1f96642b72c65546393c7fe9d201f52737885c02
|
/Rexam/lab11.R
|
440186f19b1d2eeff1b982976fe0273bdd60550a
|
[] |
no_license
|
kdragonkorea/R-data-analysis
|
3051881bcc9984e20092ba65feda712b7aa76427
|
0625fea1d1b4842ed6d7554e5a6aaf20d2b4d43a
|
refs/heads/master
| 2023-03-23T00:24:06.549611 | 2021-03-15T00:14:10 | 2021-03-15T00:14:10 | 341,584,046 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,533 |
r
|
lab11.R
|
library(RSelenium)
remDr <- remoteDriver(remoteServerAddr = "localhost" , port = 4445, browserName = "chrome")
remDr$open()
remDr$navigate("http://gs25.gsretail.com/gscvs/ko/products/event-goods")
goodsname <- NULL; goodsprice <- NULL; nextpage <- NULL
# 첫 페이지에서 2+1 메뉴로 이동
two_to_one <- remDr$findElement(using='css selector', "div > ul > li:nth-child(2) > span")
two_to_one$clickElement()
# 2+1의 메뉴에서 모든 페이지 정보 가져오기
repeat {
name <- remDr$findElements(using='css selector','div > div:nth-child(5) > ul > li > div > p.tit')
name <- sapply(name,function(x){x$getElementText()})
goodsname <- c(goodsname, unlist(name))
print(length(goodsname))
price <- remDr$findElements(using='css selector','div > div:nth-child(5) > ul > li > div > p.price > span')
price <- sapply(price,function(x){x$getElementText()})
goodsprice <- c(goodsprice, unlist(price))
print(length(goodsprice))
# nextpage <- remDr$findElement(using='css selector', 'div.cnt_section.mt50 > div > div > div:nth-child(5) > div > a.next')
# 마지막 페이지에서 페이지 이동 멈추기
nextpage <- remDr$findElement(using='css selector', 'div.cnt_section.mt50 > div > div > div:nth-child(5) > div > a.next')
if(nextpage$getElementAttribute("onclick") != 'goodsPageController.moveControl(1)'){
break;
}
nextpage$clickElement()
Sys.sleep(3)
}
gs25_twotoone <- data.frame(goodsname, goodsprice)
View(gs25_twotoone)
write.csv(gs25_twotoone, "output/gs25_twotoone.csv")
|
249778eced85499440f2f50c681c8a7de099b64b
|
bed0fbea3a7dce73838418e15e2516b1a16a490b
|
/man/linspace.Rd
|
2ddc848c72b3c3519e30ccd3e7b2f9415385435d
|
[] |
no_license
|
bgreenwell/ramify
|
af5fc93c73844869ab5de44d0dd126496e8e4b79
|
7dbadcb773f6d9b7e910e97bc57b2d17b4df7927
|
refs/heads/master
| 2021-01-17T04:11:44.564375 | 2017-01-04T13:02:16 | 2017-01-04T13:02:16 | 31,129,928 | 1 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 652 |
rd
|
linspace.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convenience.R
\name{linspace}
\alias{linspace}
\title{Linearly-spaced Elements}
\usage{
linspace(a, b, n = 50)
}
\arguments{
\item{a}{The starting value of the sequence.}
\item{b}{The final value of the sequence.}
\item{n}{The number of samples to generate. Default is 50.}
}
\value{
A vector of linearly-spaced elements.
}
\description{
Construct a vector of \code{n} linearly-spaced elements from \code{a}
to \code{b}.
}
\examples{
linspace(0, 1)
linspace(1, 5, 5)
linspace(1+2i, 10+10i, 8)
logspace(0, pi, 10)
}
\seealso{
\code{\link{logspace}}, \code{\link{seq}}.
}
|
bfdde574377bea7707e59a7a6e90e8efedc4f5c6
|
715f2721c5f9c69876c75957694cef3ceea86e0e
|
/man/tsa.Rd
|
130852006e1b149f8dd94f7414be84ff8e44f0ae
|
[
"Apache-2.0"
] |
permissive
|
YongLuo007/bcmaps
|
f59a35edc8a5dd63c1be8aa90db5b6323b28aad4
|
bf71d9f0f9ab8292f68e0852f655a37982e59eab
|
refs/heads/master
| 2021-05-05T11:38:28.271150 | 2020-01-20T19:00:23 | 2020-01-20T19:00:23 | 118,187,826 | 0 | 1 | null | 2018-01-19T22:55:03 | 2018-01-19T22:55:03 | null |
UTF-8
|
R
| false | true | 1,069 |
rd
|
tsa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_data.R
\name{tsa}
\alias{tsa}
\title{British Columbia Timber Supply Areas and TSA Blocks}
\format{An \code{sf} or \code{Spatial} polygons object with B.C.'s Timber Supply
Areas and TSA Blocks}
\source{
Original data from the
\href{https://catalogue.data.gov.bc.ca/dataset/8daa29da-d7f4-401c-83ae-d962e3a28980}{B.C. Data Catalogue},
under the
\href{https://www2.gov.bc.ca/gov/content?id=A519A56BC2BF44E4A008B33FCF527F61}{Open
Government Licence - British Columbia}.
}
\usage{
tsa(class = c("sf", "sp"), ...)
}
\arguments{
\item{class}{class of object to import; one of \code{"sf"} (default) or \code{"sp"}.}
\item{...}{arguments passed on to \link{get_big_data}}
}
\description{
The spatial representation for a Timber Supply Area or TSA Supply Block:
A Timber Supply Area is the primary unit for allowable annual cut (AAC)
determination. A TSA Supply Block is a designated area within the TSA
where the Ministry approves the allowable annual cuts.
}
\details{
Updated 2017-11-03
}
|
22e7bbc03ba9b976607ab22a467d5504223ba012
|
8dc79304ecd803c5a9bce0dd62e4b25d4523649d
|
/man/getFunctionEnvelopeCat.Rd
|
c45c53b92fcc29dcf35f3a2d5489789ce2b79211
|
[] |
no_license
|
jonotuke/catenary
|
2fb519be7ef9cafcee1255902971f12de383c81d
|
f2e64e4dabe69af8b74fae028ff179f72efae4b5
|
refs/heads/master
| 2020-04-02T04:05:23.024278 | 2018-05-04T07:47:44 | 2018-05-04T07:47:44 | 60,389,251 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 889 |
rd
|
getFunctionEnvelopeCat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getFunctionEnvelopeCat.R
\name{getFunctionEnvelopeCat}
\alias{getFunctionEnvelopeCat}
\title{Function to return function envelope for catenary}
\usage{
getFunctionEnvelopeCat(data, R = 1000, initial, x)
}
\arguments{
\item{data}{data frame with columns \code{x} and \code{y}}
\item{initial}{vector of starting values (c1,c2,lambda)}
}
\value{
data frame with x, lwr and upr
}
\description{
Use bootstrap to get bands of possible fits to data using catenary
}
\note{
February 12 2013
}
\examples{
x <- runif(100,-2,2)
y <- f(x=x,c1=1,c2=2,lambda=3) + rnorm(100)
df <- data.frame(x=x,y=y)
plot(y~x,data=df,pch=16,cex=0.5)
bounds <- getFunctionEnvelopeCat(data=df,initial=c(1,2,3),x=seq(-2,2,l=100))
lines(bounds$x,bounds$lwr)
lines(bounds$x,bounds$upr)
}
\author{
Jono Tuke, Matthew Roughan
}
\keyword{internal}
|
b9dc257fb85e457f6c4cac9ee769bcddda46250d
|
cb5d3ff3ab8e30c7c14215d1d3a64a05d82b02c3
|
/plot3.R
|
7d868db40c5a60123473a659461b48328c9e7ccb
|
[] |
no_license
|
dwaynedreakford/ExData_Plotting1
|
d2abc89062ad48c43dc83dab2f98b02631aca104
|
fe382136aa5467e3a26a2f4b04242b14fe648121
|
refs/heads/master
| 2021-01-11T03:05:58.774256 | 2016-10-17T06:08:59 | 2016-10-17T06:08:59 | 71,093,291 | 0 | 0 | null | 2016-10-17T02:34:56 | 2016-10-17T02:34:55 | null |
UTF-8
|
R
| false | false | 1,724 |
r
|
plot3.R
|
makePlot3 <- function() {
# Read the dataset
powerData <- read.delim("household_power_consumption.txt", sep = ";", na.strings = "?", colClasses = "character")
# Filter the observations for the dates "2007-02-01" and "2007-02-02"
powerData["DateTime"] <- strptime(paste(powerData$Date, powerData$Time), format="%d/%m/%Y %H:%M:%S")
powerData$Date <- as.Date(powerData$Date, format="%d/%m/%Y")
powerData <- subset(powerData, powerData$Date==as.Date("2007-02-01") | powerData$Date==as.Date("2007-02-02"))
# Convert the numeric variables
powerData$Global_active_power <- as.numeric(powerData$Global_active_power)
powerData$Global_reactive_power <- as.numeric(powerData$Global_reactive_power)
powerData$Voltage <- as.numeric(powerData$Voltage)
powerData$Global_intensity <- as.numeric(powerData$Global_intensity)
powerData$Sub_metering_1 <- as.numeric(powerData$Sub_metering_1)
powerData$Sub_metering_2 <- as.numeric(powerData$Sub_metering_2)
powerData$Sub_metering_3 <- as.numeric(powerData$Sub_metering_3)
# Create a date-time (POSIXlt) vector from the Date and Time variables
dateTime <- strptime(paste(as.character.Date(powerData$Date), powerData$Time), format="%Y-%m-%d %H:%M:%S")
# Create the plot
png(filename = "plot3.png")
plot(dateTime, powerData$Sub_metering_1, type="n", ylab = "Energy sub metering", xlab = "")
lines(dateTime, powerData$Sub_metering_1)
lines(dateTime, powerData$Sub_metering_2, col = "red")
lines(dateTime, powerData$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"))
dev.off()
}
|
351be250c035122edcb0eafeb362b4a417676fe8
|
7b7a28198b4948db5ce5040ed6ded340cda2d1cb
|
/R/DNbuilder-lm.R
|
b9e636b22ce28457a2ef18482bfd6ac536a1e57a
|
[] |
no_license
|
amirjll/DynNom-V4.1.1
|
5061dcdb7ed27addd20c704ce54be74d7167bbb1
|
8c7afbd08321241cc5c41905d666489b6059ddd4
|
refs/heads/master
| 2020-03-23T22:24:24.185296 | 2018-07-24T14:56:10 | 2018-07-24T14:56:10 | 142,168,957 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 10,323 |
r
|
DNbuilder-lm.R
|
DNbuilder.lm <- function(model, data, clevel = 0.95, m.summary = c("raw", "formatted"),
covariate = c("slider", "numeric")) {
if (length(dim(data)) > 2 & sum(class(data)=="data.frame")==0)
stop("Error in data format: dataframe format required")
if (attr(model$terms, "dataClasses")[[1]] == "logical")
stop("Error in model syntax: logical form for response not supported")
if (tail(names(attr(model$terms,"dataClasses")), n = 1) == "(weights)") {
n.terms <- length(attr(model$terms,"dataClasses"))
attr(model$terms,"dataClasses") <- attr(model$terms,"dataClasses")[1:n.terms - 1]
}
for(i in 1:length(names(attr(model$terms, "dataClasses")))) {
com1 <- numeric(length(names(data)))
for(j in 1:length(names(data))) {
if (names(attr(model$terms, "dataClasses"))[i] == names(data)[j]) com1[j] = 1
}
if (sum(com1) == 0)
stop("Error in model syntax: some of model's terms do not match to variables' name in dataset")
}
covariate <- match.arg(covariate)
m.summary <- match.arg(m.summary)
wdir <- getwd()
app.dir <- paste(wdir, "DynNomapp", sep="/")
message(paste("creating new directory: ", app.dir, sep=""))
dir.create(app.dir)
setwd(app.dir)
message(paste("Export dataset: ", app.dir, "/dataset.rds", sep=""))
saveRDS(data, "dataset.rds")
#################################
y <- model$model[[1]]
mterms <- attr(model$terms, 'dataClasses')
n.mterms <- names(mterms)
xlevels <- model$xlevels
df <- model$df.residual
model.call <- paste('Linear Regression:', model$call[2], sep = ' ')
plot.title <- paste(clevel * 100, '% ', 'Confidence Interval for Response', sep = '')
if(tail(n.mterms,n=1)=="(weights)"){
callm = paste(paste(model$call)[1],"(",paste(model$call)[2],", ","data = data",", ","weights = ", paste(model$call)[length(paste(model$call))] ,")", sep="")
} else{
callm = paste(paste(model$call)[1],"(",paste(model$call)[2],", ","data = data",")", sep="")
}
if (m.summary == 'raw'){
m.print <- paste("summary(model)", sep="")
} else{
m.print <- paste("stargazer(model, type = 'text', omit.stat = c('LL', 'ser', 'f'), ci = TRUE, ci.level = ",clevel,",
single.row = TRUE, title = '",model.call,"')", sep="")
}
datname <- paste(substitute(data))
if(length(datname) > 1){
datname <- datname[1]
cat("\n Warning messages:
The data frame name might be incorrect due to its complicated structure.
You need to edit the following line in global script to calling your data correctly
data <- ....", "\n")
}
#### global.R generator
GLOBAL=paste("library(ggplot2)
library(shiny)
library(plotly)
library(stargazer)
library(compare)
##################################################################
#### You may need to edit the following lines
#### if data or the model are not defined correctly
##################################################################
data <- readRDS('dataset.rds')
model <- ",callm,"
m.summary <- '",m.summary,"'
covariate <- '", covariate,"'
", sep="")
#### server.R generator
SERVER=paste("input.data <- NULL
old.d <- NULL
xlevels <- model$xlevels
mterms <- attr(model$terms, 'dataClasses')
n.mterms <- names(mterms)
server = function(input, output){
q <- observe({ if (input$quit == 1) stopApp() })
limits0 <- c(",suppressWarnings(mean(as.numeric(y)) - 3 * sd(y)),", ",suppressWarnings(mean(as.numeric(y)) + 3 * sd(y)),")
limits <- reactive({ if (as.numeric(input$limits) == 1) {limits <- c(input$lxlim, input$uxlim)} else {limits <- limits0} })
neededVar <- n.mterms
data <- data[, neededVar]
input.data <<- data[1, ]
input.data[1, ] <<- NA
b <- 1
i.factor <- NULL
i.numeric <- NULL
for (j in 2:length(mterms)) {
for (i in 1:length(data)) {
if (n.mterms[j] == names(data)[i]) {
if (mterms[[j]] == 'factor' |
mterms[[j]] == 'ordered' |
mterms[[j]] == 'logical') {
i.factor <- rbind(i.factor, c(n.mterms[j], j, i, b))
(break)() }
if (mterms[[j]] == 'numeric') {
i.numeric <- rbind(i.numeric, c(n.mterms[j], j, i))
b <- b + 1
(break)()
} } } }
nn <- nrow(i.numeric)
if (is.null(nn)) { nn <- 0 }
nf <- nrow(i.factor)
if (is.null(nf)) { nf <- 0 }
if (nf > 0) {
output$manySliders.f <- renderUI({
slide.bars <- list(lapply(1:nf, function(j) {
selectInput(paste('factor', j, sep = ''),
names(mterms[as.numeric(i.factor[j, 2])]),
xlevels[[as.numeric(i.factor[j, 2]) - as.numeric(i.factor[j, 4])]], multiple = FALSE)
}))
do.call(tagList, slide.bars)
}) }
if (nn > 0) {
output$manySliders.n <- renderUI({
if (covariate == 'slider') {
slide.bars <- list(lapply(1:nn, function(j) {
sliderInput(paste('numeric', j, sep = ''),
names(mterms[as.numeric(i.numeric[j, 2])]),
min = floor(min(na.omit(data[, as.numeric(i.numeric[j, 3])]))),
max = ceiling(max(na.omit(data[, as.numeric(i.numeric[j, 3])]))),
value = mean(na.omit(data[, as.numeric(i.numeric[j, 3])])))
})) }
if (covariate == 'numeric') {
slide.bars <- list(lapply(1:nn, function(j) {
numericInput(paste('numeric', j, sep = ''),
names(mterms[as.numeric(i.numeric[j, 2])]),
value = round(mean(na.omit(data[, as.numeric(i.numeric[j, 3])]))))
})) }
do.call(tagList, slide.bars)
}) }
a <- 0
new.d <- reactive({
input$add
if (nf > 0) { input.f <- vector('list', nf)
for (i in 1:nf) { input.f[[i]] <- isolate({ input[[paste('factor', i, sep = '')]] })
names(input.f)[i] <- i.factor[i, 1] } }
if (nn > 0) { input.n <- vector('list', nn)
for (i in 1:nn) { input.n[[i]] <- isolate({ input[[paste('numeric', i, sep = '')]] })
names(input.n)[i] <- i.numeric[i, 1] } }
if (nn == 0) { out <- data.frame(do.call('cbind', input.f)) }
if (nf == 0) { out <- data.frame(do.call('cbind', input.n)) }
if (nf > 0 & nn > 0) { out <- data.frame(do.call('cbind', input.f), do.call('cbind', input.n)) }
if (a == 0) { wher <- match(names(out), names(input.data)[-1])
out <- out[wher]
input.data <<- rbind(input.data[-1], out) }
if (a > 0) { wher <- match(names(out), names(input.data))
out <- out[wher]
if (isTRUE(compare(old.d, out)) == FALSE) {input.data <<- rbind(input.data, out)}}
a <<- a + 1
out })
p1 <- NULL
old.d <- NULL
data2 <- reactive({
if (input$add == 0)
return(NULL)
if (input$add > 0) { if (isTRUE(compare(old.d, new.d())) == FALSE) {
OUT <- isolate({ pred <- predict(model, newdata = new.d(), conf.int = ",clevel,", se.fit = TRUE)
lwb <- pred$fit - (",qt(1 - (1 - clevel)/2, df)," * pred$se.fit)
upb <- pred$fit + (",qt(1 - (1 - clevel)/2, df)," * pred$se.fit)
d.p <- data.frame(Prediction = pred$fit, Lower.bound = lwb, Upper.bound = upb)
old.d <<- new.d()
data.p <- cbind(d.p, counter = 1)
p1 <<- rbind(p1, data.p)
p1$count <- seq(1, dim(p1)[1])
p1 }) } else { p1$count <- seq(1, dim(p1)[1])
OUT <- p1 } }
OUT })
output$plot <- renderPlotly({
if (input$add == 0)
return(NULL)
if (is.null(new.d())) return(NULL)
if (is.na(input$lxlim) | is.na(input$uxlim)) { lim <- limits0 } else { lim <- limits() }
PredictNO <- 0:(sum(data2()$counter) - 1)
in.d <- data.frame(input.data[-1,])
xx=matrix(paste(names(in.d), ': ',t(in.d), sep=''), ncol=dim(in.d)[1])
Covariates=apply(xx,2,paste,collapse='<br />')
yli <- c(0 - 0.5, 10 + 0.5)
if (dim(input.data)[1] > 11) yli <- c(dim(input.data)[1] - 11.5, dim(input.data)[1] - 0.5)
p <- ggplot(data = data2(), aes(x = Prediction, y = PredictNO, text = Covariates,
label = Prediction, label2 = Lower.bound, label3=Upper.bound)) +
geom_point(size = 2, colour = data2()$count, shape = 15) +
ylim(yli[1], yli[2]) + coord_cartesian(xlim = lim) +
geom_errorbarh(xmax = data2()$Upper.bound, xmin = data2()$Lower.bound,
size = 1.45, height = 0.4, colour = data2()$count) +
labs(title = '",plot.title,"',
x = 'Response Variable', y = NULL) + theme_bw() +
theme(axis.text.y = element_blank(), text = element_text(face = 'bold', size = 10))
gp=ggplotly(p, tooltip = c('text','label','label2','label3'))
gp})
output$data.pred <- renderPrint({
if (input$add > 0) {
if (nrow(data2() > 0)) {
if (dim(input.data)[2] == 1) {
in.d <- data.frame(input.data[-1, ])
names(in.d) <- ",n.mterms[2],"
data.p <- cbind(in.d, data2()[1:3]) }
if (dim(input.data)[2] > 1) { data.p <- cbind(input.data[-1, ], data2()[1:3]) }}
stargazer(data.p, summary = FALSE, type = 'text') } })
output$summary <- renderPrint({
",m.print,"
})
}
", sep = "")
#### ui.R generator
UI=paste("ui = bootstrapPage(fluidPage(
titlePanel('Dynamic Nomogram'),
sidebarLayout(sidebarPanel(uiOutput('manySliders.f'),
uiOutput('manySliders.n'),
checkboxInput('limits', 'Set x-axis ranges'),
conditionalPanel(condition = 'input.limits == true',
numericInput('uxlim', 'x-axis lower', NA),
numericInput('lxlim', 'x-axis upper', NA)),
actionButton('add', 'Predict'),
br(), br(),
helpText('Press Quit to exit the application'),
actionButton('quit', 'Quit')
),
mainPanel(tabsetPanel(id = 'tabs',
tabPanel('Graphical Summary', plotlyOutput('plot')),
tabPanel('Numerical Summary', verbatimTextOutput('data.pred')),
tabPanel('Model Summary', verbatimTextOutput('summary'))
))))
)", sep = "")
output=list(ui=UI, server=SERVER, global=GLOBAL)
text <- paste("This guide will describe how to deploy a shiny application using scripts generated by DNbuilder:
1. Run the shiny app by setting your working directory to the DynNomapp folder, and then run: shiny::runApp()
If you are using the RStudio IDE, you can also run it by clicking the Run App button in the editor toolbar after open one of the R scripts.
2. You may want to modify the codes to apply all the necessary changes. Run again to confirm that your application works perfectly.
3. Deploy the application by either clicking on the Publish button in the top right corner of the running app, or use the generated files and deploy it on your server if you host any.
You can find a full guide of how to deploy an application on shinyapp.io server here:
http://docs.rstudio.com/shinyapps.io/getting-started.html#deploying-applications", sep="")
message(paste("writing file: ", app.dir, "/README.txt", sep=""))
writeLines(text, "README.txt")
message(paste("writing file: ", app.dir, "/ui.R", sep=""))
writeLines(output$ui, "ui.R")
message(paste("writing file: ", app.dir, "/server.R", sep=""))
writeLines(output$server, "server.R")
message(paste("writing file: ", app.dir, "/global.R", sep=""))
writeLines(output$global, "global.R")
setwd(wdir)
}
|
46c881b22f0647f5c701e3b32071fd55e01a1c13
|
dc359f8017e0d3d8b89585b012d6ddfa92d0336e
|
/R/make_csv.R
|
3ed9fc414bc0cb6d422e339bb10e5490b1f7f2b9
|
[] |
no_license
|
fmichonneau/impatiens
|
ab50245429f1c87b6e8791d99750e20faa1dc158
|
d48d31d5fb6143ffcc770c92459575297a509f24
|
refs/heads/master
| 2021-03-16T08:29:46.843419 | 2015-05-28T19:10:54 | 2015-05-28T19:10:54 | 26,188,539 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 691 |
r
|
make_csv.R
|
check_file <- function(file, verbose) {
if (!file.exists(file)) {
stop(file, " wasn't created.")
} else {
if (verbose) message(file, " succesfully created.")
}
}
create_dir <- function(file, verbose) {
if (!file.exists(dirname(file))) {
if (verbose) {
message("Create: ", dirname(file))
}
dir.create(dirname(file), recursive = TRUE)
}
}
make_csv <- function(obj, file, ..., verbose = TRUE) {
on.exit(check_file(file, verbose = verbose))
create_dir(file = file, verbose = verbose)
if (verbose) {
message("Creating csv file: ", file)
}
write.csv(obj, file = file, row.names = FALSE, ...)
}
|
5c4be76e277d1200ec689dd164f4e8e41fdfbf94
|
b2cb3b4ad1b581a59448552b0d0911e6a58c3dae
|
/chapter3_simulations/code/generate_prior_predictive_distributions.R
|
4122008f09c7bd2dcdffb5b5a1aba08e853e529f
|
[
"MIT"
] |
permissive
|
vasishth/RetrievalModels
|
1c3c6a9b915cdae7073c7895653cdf089cb6f693
|
40eb268da11cd3adb7287ec32435cfc32c7de724
|
refs/heads/master
| 2022-01-01T11:49:06.842924 | 2021-12-16T10:11:43 | 2021-12-16T10:11:43 | 242,512,415 | 1 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 2,825 |
r
|
generate_prior_predictive_distributions.R
|
library(dplyr)
library(tidyr)
library(ggplot2)
source("interACT.R")
rmsd <- function (obs, pred) {
sqrt(mean((obs - pred)^2, na.rm = TRUE))
}
compute_int_means <- function(d){
int <- select(filter(d, Distractor=="Match"), -Condition, -Distractor)
dim(int)
int$int <- filter(d, Distractor=="Match")$latency - filter(d, Distractor=="Mismatch")$latency
#
# means <- group_by(int, Set, Target, lf, ans, mas, mp, rth, bll, lp, ldp, blc, dbl, ndistr) %>% summarise(Effect=mean(int), SE=sd(int)/sqrt(length(int))) %>% ungroup() %>% mutate(lower=Effect-SE, upper=Effect+SE)
# means
means <- group_by(int, Set, Target, lf, ans, mas, mp, rth, bll, psc, pic, qcf, qco, cuesim, tprom, dprom, lp, ldp, blc, dbl, ndistr, cueweighting) %>% summarise(Effect=mean(int), SE=sd(int)/sqrt(length(int)), Sji_neg=sum(Sji_neg)) %>% ungroup() %>% mutate(lower=Effect-SE, upper=Effect+SE)
means
}
convert2log <- function(x){
ifelse(x>=1, log(x), ifelse(x<=-1, -log(abs(x)), 0))
}
convert2log10 <- function(x){
x <- ifelse(x>-1 & x<1, 0, x)
x <- ifelse(x<=-1, -log10(abs(x)), x)
x <- ifelse(x>=1, log10(abs(x)), x)
}
## this function does the work of estimating effect sizes:
iterate_lf <- function(values){
maxset <- 0
means <- NULL
for(v in values){
lf <<- v
pmatr <- create_param_matrix(model_4cond, 1000)
results <- run(pmatr)
means2 <- compute_int_means(results)
means2$Set <- means2$Set+maxset
means <- bind_rows(means, means2)
}
means
}
## we set the parameters:
reset_params()
psc <<- 0
qcf <<- 0
cuesim <<- -1
bll <<- 0.5
#mp <<- seq(0,3,1)
#mp <<- seq(0.15,.35,0.1)
## default in Engelmann et al 2019 Cog Sci paper
mp <<- 0.15
#mas <<- seq(1,2,0.5)
## default in Engelmann et al 2019 Cog Sci paper
mas <<- 1.5
#mas<<-sort(rnorm(50,mean=1.5,sd=0.25))
#hist(mas)
#ans <<- seq(0.1,0.3,.1)
# default in Engelmann et al 2019 Cog Sci paper
ans <<- 0.2
##rth <<- seq(-2,-1,.5)
# default in Engelmann et al 2019 Cog Sci paper
rth <<- -1.5
# dbl <<- seq(-2,2,1)
dbl <<- 0
#cueweighting <<- seq(1,2,by=0.5)
cueweighting <<- 1 ## must be estimated
latency_factor <<- seq(0.1,0.5,.1)
#latency_factor <<- sort(abs(rnorm(100,mean=0.3,sd=0.1)))
## for Engelmann Vasishth book:
# simulations using cuewt 1
means <- iterate_lf(latency_factor)
lv05pspaceEVcuewt1 <- means
save(lv05pspaceEVcuewt1, file="lv05pspaceEVcuewt1.Rd")
# simulations using cuewt 2
cueweighting <<- 2
means <- iterate_lf(latency_factor)
lv05pspaceEVcuewt2 <- means
save(lv05pspaceEVcuewt2, file="lv05pspaceEVcuewt2.Rd")
# simulations using cuewt 4
cueweighting <<- 4
means <- iterate_lf(latency_factor)
lv05pspaceEVcuewt4 <- means
save(lv05pspaceEVcuewt4, file="lv05pspaceEVcuewt4.Rd")
system("cp lv05pspaceEVcuewt* ../data/")
with(means,tapply(Effect,Target,mean))
## figures continued in c02SCCPVasishthEngelmann.Rnw
|
f40441b5e7e797a77326d675c1c114aec6e09584
|
154f590295a74e1ca8cdde49ecbb9cbb0992147e
|
/man/cv.Rd
|
f2133c31d4cb0fea71e295bf80d907e6917a4b7e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0"
] |
permissive
|
klingerf2/EflowStats
|
2e57df72e154581de2df3d5de3ebd94c3da0dedf
|
73891ea7da73a274227212a2ca829084149a2906
|
refs/heads/master
| 2017-12-07T10:47:25.943426 | 2016-12-28T20:52:42 | 2016-12-28T20:52:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 522 |
rd
|
cv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.R
\name{cv}
\alias{cv}
\title{Function to return the coefficient of variation for a given data series}
\usage{
cv(x)
}
\arguments{
\item{x}{data frame containing value data for the chosen timeseries}
}
\value{
cv coefficient of variation for the given data frame
}
\description{
This function accepts a data frame containing daily data and returns the coefficient of variation
}
\examples{
qfiletempf<-sampleData
cv(qfiletempf$discharge)
}
|
73a31bc41376ae3769ecfc570edb030cb98f4b7b
|
8a081c8fb7584d2ecd3dd68479923d8dbf0345e1
|
/Generate_InputFiles.R
|
8fcdbf0f540bfd7200541cd7c0926c8254532c93
|
[] |
no_license
|
lfuess/TagSeqMS
|
a895d140475f8b6b8e7cf027ab18389c2cd4d61c
|
6da10a68f3d92047ba7b36e6d09a908bc6da6492
|
refs/heads/master
| 2022-09-07T06:13:12.101637 | 2020-06-03T21:30:21 | 2020-06-03T21:30:21 | 257,366,869 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 2,141 |
r
|
Generate_InputFiles.R
|
##This script will be used to prepare data for DESEQ2##
library(dplyr)
library(janitor)
library(data.table)
initialdata = read.csv("Masterdata_TagSeq_SDH_23July2018.csv", check.names= FALSE)
dim(initialdata);
names(initialdata);
##Now to select the data we want##
data = as.data.frame(initialdata[c(2,4,27:28,30:31,34,54)]);
names(data)
dim(data)
head(data)
##input the list of samples you are using##
##we use a file hand currated which lists all the samples that passed sequencing and assembly##
samples = read.csv("Good_Samples.csv", check.names = FALSE)
names(samples)
dim(samples)
##and combine the files to reduce down to just the good samples##
merged=merge(samples, data, by.x = "Sample", by.y = "sample_ID")
dim(merged) ##should have 408 lines remaining##
##Select everything but F1s
GBC=merged[merged[,3] == "GBC",]
RBC=merged[merged[,3] == "RBC",]
F2=merged[merged[,3] == "F2",]
final=rbind(GBC,RBC,F2)
dim(final) ##should be 393##
##order##
final1=final[order(final$Sample),]
##change NAs in sex to Unkown##
levels <- levels(final1$Sex)
levels[length(levels) + 1] <- "U"
final1$Sex <- factor(final1$Sex, levels = levels)
final1[c("Sex")][is.na(final1[c("Sex")])] <- "U"
##remove rows with NAs##
final = final1[complete.cases(final1), ]
##write out the experimental design file##
write.csv(final, file = "ExpDesign.csv", row.names=FALSE)
##last step is to refine our read count matrix##
##get a list of new good samples (samples which didn't have NAs for any of our model factors)##
gs = final[,c(1:2)]
##merge that with your read count matrix, to get a read count matrix with only good samples and no F1s##
reads = as.data.frame(t(read.csv("allcounts.csv", check.names = FALSE)))
reads[1:10,1:10]
##fix names##
reads = reads %>%
row_to_names(row_number = 1)
reads = setDT(reads, keep.rownames = TRUE)[]
##merge##
merged=merge(reads, gs, by.x = "rn", by.y = "Sample")
merged=merged[,c(1:25846)]
##write it out##
finalcounts=as.data.frame(t(merged))
finalcounts[1:10,1:10]
finalcounts = finalcounts %>%
row_to_names(row_number = 1)
write.csv(finalcounts, "allcounts_final.csv")
|
afe0e04e34439c975a8f9ffbe731d5f7ac08424a
|
9053fe0a4613ceb51475071215358acffc4c4976
|
/assignment1/R_Python_Session/a.r
|
4a134f5d714fa1a0b8db973a5800fc6123b5c946
|
[] |
no_license
|
arunv3rma/CS-725
|
fbf1c38a6a1ab15276eae487a137ba72ffb27d3c
|
171d879041eb02a43fafc9a19982776f8199b412
|
refs/heads/master
| 2021-06-04T07:19:02.966467 | 2016-10-25T20:11:05 | 2016-10-25T20:11:05 | 69,154,112 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 3,317 |
r
|
a.r
|
###############################################################################
##Part-1
setwd("/home/raju/Downloads/kaggle")
train <- read.csv("~/Downloads/kaggle/train.csv")
test <- read.csv("~/Downloads/kaggle/test.csv")
str(train)
head(train,100)
t1<-train[1:100,]
t2<-train[100:891,]
str(t1)
str(t2)
train <- t2
View(train)
table(train$Survived)
prop.table(table(train$Survived))
table(train$Age)
summary(train$Age)
##Checking the structure
str(test)
##Adding a Column
test$Survived <- rep(0,418)
##Checking the frequency of values in a column in a table
table(test$Survived)
str(test)
test$k <- rep(0)
str(test)
table(test$Survived)
##dropping a column
test$k<-NULL
str(test)
##extracting columns from table to be output use data.frame
submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived)
##writing to csv file
write.csv(submit, file = "result.csv")
###############################################################################
##Part-2
prop.table(table(train$Sex, train$Survived))
#checking proportions in the table
prop.table(table(train$Sex, train$Survived),1)
##adding cnditions
test$Survived[test$Sex == 'female'] <- 1
table(test$Survived)
train$child<-rep(0)
train$child[train$Age<18]<-1
prop.table(table(train$child))
test$child<-rep(0)
test$child[test$Age<18]<-1
prop.table(table(test$child))
##aggregate
aggregate(Survived ~ child + Sex, data=train, FUN=sum)
aggregate(Survived ~ child + Sex, data=train, FUN=length)
aggregate(Survived ~ child + Sex, data=train, FUN=function(x) {sum(x)/length(x)})
train$familysize<-train$child+train$SibSp+1
str(train)
train$Fare2 <- '30+'
train$Fare2[train$Fare < 30 & train$Fare >= 20] <- '20-30'
train$Fare2[train$Fare < 20 & train$Fare >= 10] <- '10-20'
train$Fare2[train$Fare < 10] <- '<10'
prop.table(table(train$Fare2))
prop.table(table(train$Fare2,train$Survived),1)
str(test)
test$Fare2 <- '30+'
test$Fare2[test$Fare < 30 & test$Fare >= 20] <- '20-30'
test$Fare2[test$Fare < 20 & test$Fare >= 10] <- '10-20'
test$Fare2[test$Fare < 10] <- '<10'
aggregate(Survived ~ Fare2 + Sex + child, data=train, FUN=function(x) {sum(x)/length(x)})
###############################################################################
##Fitting Models
## Initial
fit1<-lm(Survived ~ Sex,data=train)
summary(fit1)
coefficients(fit1) # model coefficients
#confint(fit, level=0.95) # CIs for model parameters
fitted(fit1) # predicted values
residuals(fit1) # residuals
sum(residuals(fit1))
#anova(fit1) # anova table
#vcov(fit1) # covariance matrix for model parameters
## With child and Fare and Sex
fit <- lm(Survived ~ Fare2 + Sex + child, data=train)
summary(fit)
coefficients(fit) # model coefficients
#confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
sum(residuals(fit))
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
#influence(fit) # regression diagnostics
test$Survived1<-round(predict(fit1,test))
test$Survived<-round(predict(fit,test))
submit1 <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived1)
##writing to csv file
write.csv(submit1, file = "result1.csv")
submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived)
##writing to csv file
write.csv(submit, file = "result.csv")
##Plots
|
e155298e813aa55785553a57a8a638a81969b3c0
|
eb2a963e50d6954cdc73a4d0d6ab9a5dcfa35008
|
/jules/ASMap/man/exmap.Rd
|
0cfe2d9a949f100ad3d06c819b2674bdcae7e40a
|
[
"CC-BY-3.0"
] |
permissive
|
dpastoor/R-workshop
|
e02a3e27ef40fb780d6ac210c1e8d0c49c5eb7f6
|
41f5b8257cf532128d934216bdd32c2dedcdaac1
|
refs/heads/master
| 2021-01-24T03:08:14.742841 | 2014-07-11T00:17:02 | 2014-07-11T00:17:02 | 21,742,907 | 0 | 1 | null | null | null | null |
UTF-8
|
R
| false | false | 895 |
rd
|
exmap.Rd
|
\name{exmap}
\alias{exmap}
\docType{data}
\title{Genotypic marker data for a doubled haploid wheat population in R/qtl format}
\description{Linkage map marker data for a doubled
haploid population in the form of a constructed R/qtl object.
}
\usage{data(exmap)}
\format{This data relates to a linkage map of 599 markers genotyped on 218
individuals. The linkage map consists of 23 linkage groups spanning
the whole genome. Map distances have been
estimated using \code{read.cross} with the \code{"kosambi"}
mapping function. The data object has been originally constructed with
MultiPoint and curated with MapManager and R/qtl. The data is in R/qtl
format with a class structure \code{c("bc","cross")}. See
\code{read.cross} documentation for more details on the
format of this object.
}
\examples{
data(exmap, package = "ASMap")
}
\keyword{datasets}
|
49f005a6581c89c56d85b6cbc19a603b86b9af97
|
269e5ad7a6255e2de06a3512858b6a1151992eaa
|
/R/Align.Concat.R
|
74c6234bc3df4db8d47b4e94925cf2c5d40f6162
|
[] |
no_license
|
dvdeme/regPhylo
|
0c3b158283e9a7eaa72f7d8597e65eb2a4b478a0
|
56017f10b5ac7c2f54972572739b509175360a6f
|
refs/heads/master
| 2023-06-13T02:42:38.686275 | 2023-05-26T10:07:29 | 2023-05-26T10:07:29 | 165,783,636 | 5 | 1 | null | 2020-06-12T04:45:59 | 2019-01-15T04:10:34 |
R
|
UTF-8
|
R
| false | false | 12,903 |
r
|
Align.Concat.R
|
#' @title Concatenate alignments from different gene regions into a supermatrix at the species level
#' @description This function concatenates the alignments from different gene regions into a
#' single supermatrix in nexus and fasta formats, at the species level. The function also allows the
#' inclusion of species without DNA sequences, if necessary (for instance, to then use BEAST to resolve
#' polytomies).
#' @return This function returns: 1) the alignments from different gene regions
#' in nexus format '.nex', including taxa that do not have DNA sequence
#' information (nucleotides replace by '-') (all these files can be loaded separately
#' into BEAST); 2) a concatenation (a supermatrix) of all the sequences of
#' the different gene regions in nexus and fasta format; 3) a partition file in
#' txt format 'Partitions_Concat.txt' including the partitions of the different gene regions in the
#' concatenated file (this file is designed to be RAxML compatible, see RAxML manual
#' v8.2, https://sco.h-its.org/exelixis/resource/download/NewManual.pdf);
#' 4) a conversion table 'convtab.txt' betwen the gene region names used in the partition file and the
#' true name of the gene region.
#' @param input the path to the folder storing the alignments (alignments have to be in
#' fasta format with the '.fas' extension)
#' @param Sp.List.NoDNA an optional vector of the species without DNA sequences that should be included
#' in the alignment, or the option can be NULL, in which case the function automatically creates a complete
#' species list of all the species present in the different alignments.
#' @param outputConcat Name of the supermatrix (can include the path as well).
#'
#' @examples # Run the function to build a supermatrix
#' \dontrun{
#'
#' # To run the example, copy the input alignment files
#' # provided by the package to a temporary directory created in the
#' # current working directory.
#' src.dir = system.file("extdata/multi.align/ForConcat", package = "regPhylo")
#' dir.create("TempDir.ForConcat")
#' # Set up the path of the TempDir folder.
#' dest.dir = paste(getwd(), "/TempDir.ForConcat", sep="")
#' file.names <- dir(src.dir)
#' # Copy all the files stored in regPhylo/extdata/multi.align/ForConcat"
#' # into a temporary folder.
#' sapply(file.names, function(x) {
#' file.copy(from = paste(src.dir, x, sep = "/"),
#' to = paste(dest.dir, x, sep = "/"),
#' overwrite = FALSE) })
#'
#' # Run the function to build the supermatrix.
#' Align.Concat(input = "TempDir.ForConcat", Sp.List = NULL, outputConcat = NULL)
#'
#' #' # Import the supermatrix in R
#' require(ape)
#' Supermatrix = read.dna("TempDir.ForConcat/Concat.fas", format = "fasta")
#'
#' # Create another temporary file to build a supermatrix including species without DNA.
#' dir.create("TempDir.ForConcat2")
#' file.names <- dir("TempDir.ForConcat")
#' # select only the .fas alignment
#' file.names <- file.names[grep(".fas", file.names)]
#' # remove the Concat.fas alignment just created above.
#' file.names <- file.names[-grep("Concat.fas", file.names)]
#' sapply(file.names, function(x) {
#' file.copy(from = paste("TempDir.ForConcat", x, sep = "/"),
#' to = paste("TempDir.ForConcat2", x, sep = "/"),
#' overwrite = FALSE) })
#'
#'
#' # Run the function to build a supermatrix including two species without DNA.
#' Align.Concat(input = "TempDir.ForConcat2",
#' Sp.List = c("Titi_titi", "Toto_toto"),
#' outputConcat = "TempDir.ForConcat2/Concat_2spNoDNA")
#'
#' # Import the supermatrix into R.
#' Supermatrix2SpNoDNA = read.dna("TempDir.ForConcat2/Concat_2spNoDNA.fas",
#' format = "fasta")
#'
#'
#' # To remove the files created while running the example do the following:
#' unlink("TempDir.ForConcat", recursive = TRUE)
#' unlink("TempDir.ForConcat2", recursive = TRUE)
#'
#' }
#'
#' @export Align.Concat
Align.Concat = function(input = NULL, Sp.List.NoDNA = NULL, outputConcat = NULL) {
listAli = paste(input, list.files(input), sep = "/")
listAl = listAli[grep(".fas", listAli)]
if(length(listAl) < length(listAli)) {
stop(paste("Other files in non fasta format using '.fas' extension are present
in the input folder, only '.fas' alignment files must be present."))
}
AlignList = list(1:length(listAl))
SeqName = vector()
DFmat = matrix(NA, ncol = 3)[-1, ]
i = 1
for (i in 1:length(listAl)) {
AlignList[[i]] = seqinr::read.fasta(listAl[i], as.string = TRUE) # Store the alignments in a list.
SeqT = vector()
k = 1
for (k in 1:length(AlignList[[i]])) SeqT = c(SeqT, gsub(" ", "", AlignList[[i]][[k]][1],
fixed = TRUE))
DFmat = rbind(DFmat, cbind(Alignment = rep(i, length(SeqT)), Seq.Name = labels(AlignList[[i]]),
Sequences = SeqT)) # Extract the sequence names, the sequences and the numbers of the alignment.
} # End for i.
Seq.Name.cor = gsub("_R_", "", DFmat[, 2], fixed = TRUE) # Remove the '_R_' pattern when the sequences have been reversed complemented.
Seq.Name.cor = gsub(".", "", Seq.Name.cor, fixed = TRUE) # Remove the '.', in the sequence name.
Seq.Name.cor = gsub("?", "", Seq.Name.cor, fixed = TRUE) # Remove the '?', in the sequence name.
Seq.Name.cor = gsub("-", "", Seq.Name.cor, fixed = TRUE) # Remove the '-', in the sequence name.
Seq.Name.cor = gsub("_sp_", "_sp", Seq.Name.cor, fixed = TRUE) # Remove the '_' between sp and the letter or number defining a species not yet assigned a binomial species name.
Seq.Name.cor = gsub("_nsp_", "_nsp", Seq.Name.cor, fixed = TRUE) # Remove the '_' between nsp and the letter or number defining a new species not yet assigned a binomial species name.
a = strsplit(Seq.Name.cor, "_", fixed = T) # Split the sequence name using '_' to extract the genus and species name.
a1 = lapply(a, function(x) x[1])
a2 = lapply(a, function(x) unlist(strsplit(x[2], "|", fixed = T))[1])
Sp.Name = unlist(lapply(seq(1, length(a1)), function(x) paste(a1[x], "_", a2[x],
sep = ""))) # Extract the species name as the first two elements of each item in the list.
Sp.Name.list = unique(Sp.Name) # The species list present in the different alignments
# Include the option to also provide additional species without DNA.
if (is.null(Sp.List.NoDNA)) {
Sp.DF = as.data.frame(cbind(Sp.Name = Sp.Name.list, PresenceOverall = rep(1,
length(Sp.Name.list))))
} else {
Sp.List.NoDNA = gsub(" ", "_", Sp.List.NoDNA, fixed = TRUE) # Remove the spaces in the species names, for additional species without DNA.
Sp.List.NoDNA = gsub(".", "", Sp.List.NoDNA, fixed = TRUE) # Same syntax correction for the sequence name.
Sp.List.NoDNA = gsub("?", "", Sp.List.NoDNA, fixed = TRUE)
Sp.List.NoDNA = gsub("-", "", Sp.List.NoDNA, fixed = TRUE)
Sp.List.NoDNA = gsub("_sp_", "_sp", Sp.List.NoDNA, fixed = TRUE)
Sp.List.NoDNA = gsub("_nsp_", "_nsp", Sp.List.NoDNA, fixed = TRUE)
Sp.DF = as.data.frame(cbind(Sp.Name = c(Sp.Name.list, Sp.List.NoDNA), PresenceOverall = rep(1,
length(c(Sp.Name.list, Sp.List.NoDNA)))))
}
# Large table storing all the sequences for all the alignments of interest.
DFmat2 = as.data.frame(cbind(Alignment = DFmat[, 1], Sp.Name = Sp.Name, Seq.Name.cor,
Sequences = DFmat[, 3])) # include the species name for each sequence.
# Prepare the nexus extension of the file names.
listAl.nexus = gsub(".fas", ".nex", listAl, fixed = TRUE)
# Prepare the supermatrix.
SuperMat = matrix(NA, ncol = 1)[-1, ]
# Create an alignment with all the species when considering all the alignments
# together.
i = 1
for (i in 1:length(unique(DFmat2[, 1]))) {
DFtemp = DFmat2[which(DFmat2[, 1] == i), ] # Select the alignment
AlignTemp = merge(Sp.DF, DFtemp, by.x = 1, by.y = 2, all.x = TRUE)
AlignTemp = as.matrix(AlignTemp) # Convert into a matrix
# Test is mutliple sequences per species are present in the alignment
if(dim(AlignTemp)[1]>dim(Sp.DF)[1]) {
stop(paste("The alignment ", listAl[i], " certainly contains multiple sequences for the same species:
to be concatenated at the species level, only one sequence per species per alignment must be provided!"))
}
AlignTemp[which(is.na(AlignTemp[, 4]) == "TRUE"), 5] <- paste(rep("-", nchar(as.character(DFtemp[1,
4]))), collapse = "") # Replace the empty sequence by a long string of '----'
AlignTemp[which(is.na(AlignTemp[, 4]) == "TRUE"), 4] = as.character(AlignTemp[which(is.na(AlignTemp[,
4]) == "TRUE"), 1]) # Replace the sequence name of the empty sequence by the species name.
# Feed the supermatrix.
SuperMat = cbind(SuperMat, AlignTemp[, 5])
# Create a nexus file including those empty sequences.
NBChar = nchar(as.character(AlignTemp[1, 5]))
cat(file = listAl.nexus[i], "#NEXUS", "\n", "\n", "BEGIN DATA;", "\n", "\t",
paste("DIMENSIONS NTAX=", dim(AlignTemp)[1], sep = ""), paste(" NCHAR=",
NBChar, ";", sep = ""), sep = "", append = TRUE)
cat(file = listAl.nexus[i], "\n", "\t", "FORMAT DATATYPE=DNA GAP=-;", "\n",
"MATRIX", "\n", sep = "", append = T)
utils::write.table(AlignTemp[, c(4, 5)], file = listAl.nexus[i], sep = "\t", append = TRUE,
col.names = FALSE, row.names = FALSE, quote = FALSE)
cat(file = listAl.nexus[i], "\t", ";", "\n", "END;", sep = "", append = TRUE)
} ## End for i
# Create a large supermatrix.
concat = vector()
i = 1
for (i in 1:dim(SuperMat)[1]) {
concat = c(concat, paste(SuperMat[i, ], collapse = "")) # Concatenate the different alignments in one long sequence.
}
SuperMatDF = cbind(sort(as.character(Sp.DF[, 1])), concat) # Add the species name for all the sequences, and order the sequences alphabetically to match the species names in the supermatrix.
# Create the nexus file for the supermatrix.
NBChar = nchar(as.character(concat[1]))
b = unlist(strsplit(listAl.nexus[1], "/", fixed = TRUE))
# ConcatName = paste(paste(b[-length(b)], collapse = "/"), "Concat.nex", sep = "/")
# If the option outputConcat is null, the name of the concat will be "Concat"
if(is.null(outputConcat)){
ConcatName = paste(paste(b[-length(b)], collapse = "/"), "Concat.nex", sep = "/")
} else {
ConcatName = paste(outputConcat, ".nex", sep="")
}
cat(file = ConcatName, "#NEXUS", "\n", "\n", "BEGIN DATA;", "\n", "\t", paste("DIMENSIONS NTAX=",
dim(SuperMatDF)[1], sep = ""), paste(" NCHAR=", NBChar, ";", sep = ""), sep = "",
append = TRUE)
cat(file = ConcatName, "\n", "\t", "FORMAT DATATYPE=DNA GAP=-;", "\n", "MATRIX",
"\n", sep = "", append = TRUE)
utils::write.table(SuperMatDF, file = ConcatName, sep = "\t", append = TRUE, col.names = FALSE,
row.names = FALSE, quote = FALSE)
cat(file = ConcatName, "\t", ";", "\n", "END;", sep = "", append = TRUE)
# Create a fasta file for the supermatrix.
Seq.name.seq = paste(paste(">", SuperMatDF[, 1], sep = ""), SuperMatDF[, 2],
sep = "+++")
FastaAlign = unlist(strsplit(Seq.name.seq, "+++", fixed = TRUE))
if(is.null(outputConcat)){
ConcatName = paste(paste(b[-length(b)], collapse = "/"), "Concat.fas", sep = "/")
} else {
ConcatName = paste(outputConcat, ".fas", sep="")
}
write(FastaAlign, file = ConcatName) # write the alignemnt
GeneName=gsub(".fas", "", unlist(lapply(strsplit(listAl, "_"), function(x) x[length(x)])), fixed=TRUE)
# Create a partition file for RAxML identifying the beginning and the end of each
# gene region.
genelength = vector()
i = 1
for (i in 1:dim(SuperMat)[2]) {
genelength = c(genelength, nchar(as.character(SuperMat[1, i])))
}
LimSup = cumsum(genelength) # Upper limit of the gene region.
liminf = c(1, LimSup[-length(LimSup)] + 1) # Lower limit of the gene region.
# Print the partition file (compatible with RAxML), and a conversion table providing information between the code of the gene region used by
# PartitionFinder2 and the true name of the gene region.
convtab=matrix(NA, ncol=2)[-1,]
i = 1
for (i in 1:dim(SuperMat)[2]) {
cat(file = paste(paste(b[-length(b)], collapse = "/"), "Partitions_Concat.txt",
sep = "/"), "DNA, gene", i, " = ", liminf[i], "-", LimSup[i], "\n", sep = "",
append = TRUE)
convtab = rbind(convtab, c(paste("gene", i, sep = ""), GeneName[i]))
}
colnames(convtab) = c("Name.PartitionFinder2", "Common.Gene.Name")
utils::write.table(convtab, file=paste(input, "/convtab.txt", sep=""), sep="\t", row.names=FALSE)
return(convtab)
} # End of the function.
|
f1507a4bd6840fb41fc6b70b240e9d8c8f319b58
|
c673605e54dd80c63433796bed3e71e74a4409ca
|
/svd/svd_i.R
|
c128c385d10767fab222ce33600bb83f01473e35
|
[
"BSD-3-Clause"
] |
permissive
|
hu17889/R_ALGO
|
fca739aa2fdb02077e81f02c1ec2506343f012bc
|
944010d874e027c20acfb218947735152c4421cf
|
refs/heads/master
| 2020-05-16T22:44:15.310226 | 2014-10-23T09:50:52 | 2014-10-23T09:50:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,252 |
r
|
svd_i.R
|
#!/usr/bin/env Rscript
# 非完全增量学习算法,非稀疏矩阵
r = 4 # 隐式特征数
nr = 6 # 用户数
nc = 4 # 物品数
# 真实值矩阵
inputdata = matrix(c(5,5,0,5,5,0,3,4,3,4,0,3,0,0,5,3,5,4,4,5,5,4,5,5), nrow = nr, ncol = nc, byrow = TRUE)
# 初始化分解矩阵
U = matrix(seq(1,24),nrow=r,ncol=nr)
M = matrix(2,nrow=r,ncol=nc)
# 初始化正则化系数与迭代步长
ku = 0.05
km = 0.05
u = 0.003
iter = 1
repeat
{
print(paste("---iter ",iter,"---"))
iter = iter + 1
for(i in c(1:nr)) {
# Ui的迭代差值矩阵
t = matrix(0,nrow=r,ncol=nc)
for(j in c(1:nc)) {
t[,j] = (inputdata[i,j] - t(U[,i]) %*% M[,j]) * M[,j]
}
du = rowSums(t) - ku * U[,i]
U[,i] = U[,i] + u * du
# M矩阵的迭代差值矩阵
dm = matrix(0,nrow=r,ncol=nc)
for(j in c(1:nc)) {
t = (inputdata[i,j] - t(U[,i]) %*% M[,j]) * U[,i]
dm[,j] = t - km * M[,j]
}
M = M + u * dm
}
# 判断迭代结束
RMSE = sqrt(sum((inputdata - t(U)%*%M)^2)/(nr*nc))
if(is.infinite(RMSE)) break
if(abs(RMSE)<0.01) break
print("du:")
print(du)
print("dm:")
print(dm)
print("U:")
print(U)
print("M:")
print(M)
print("U*M:")
print(t(U)%*%M)
print(paste("RMSE:",RMSE))
}
|
beb20afdae7219906b561e105d7825065df88f0e
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5976_6/rinput.R
|
105728d07d81490d552f0ba75325d541e4bb9e35
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 135 |
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5976_6.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5976_6_unrooted.txt")
|
79c998eff8d20edab3c23a600fa7d9060b041bcd
|
2d8189185b86d69b097565649f18df45945717f5
|
/SLA Scripts/PApr 2007_SLA.R
|
20a277f697bef65c6cf6762adc1bea742fa50e54
|
[] |
no_license
|
eherdter/r-work
|
e2b9035e6098c06983b198de3674e535cbc04458
|
8f429408d89d9b7dfe8146b1d3b22a65af5e2780
|
refs/heads/master
| 2021-01-16T18:27:47.789159 | 2015-03-18T17:18:33 | 2015-03-18T17:18:33 | 30,160,696 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 4,427 |
r
|
PApr 2007_SLA.R
|
library(maps)
library(spam)
library(fields)
library(chron)
library(ncdf)
SSH_4_07 = open.ncdf("dt_global_allsat_msla_h_y2007_m04.nc")
lats = get.var.ncdf(SSH_4_07, "lat")
## the latsU correspond to the sla lats and longs
lons = get.var.ncdf(SSH_4_07, "lon")
###### for June 2006 ####
# for stations 31, 10-40, PC1120, PC1140, WBSL1040- lats and longs are ~ 29.125(477), 271.124(1085)
SSH_4_07_A =get.var.ncdf(SSH_4_07, "sla", start= c(1085,477,1), count=c(1,1,1))
# for stations 14, 4-40, BR0440 - lats and longs are ~ 28.1259(473), 275.625(1103)
SSH_4_07_B = get.var.ncdf(SSH_4_07, "sla", start=c(1103, 473, 1), count= c(1,1,1))
# for stations 36, PC1320- lats and longs are ~ 28.625(475) , 269.375(1078)
SSH_4_07_C = get.var.ncdf(SSH_4_07, "sla", start=c(1078, 475, 1), count= c(1,1,1))
# for stations 38, PC1340, lats and longs ~ 28.125(473) and 269.4155(1078)
SSH_4_07_D = get.var.ncdf(SSH_4_07, "sla", start=c(1078, 473, 1), count= c(1,1,1))
# for station 58 ~ 475, 1073
SSH_4_07_E = get.var.ncdf(SSH_4_07, "sla", start=c(1073, 475, 1), count= c(1,1,1))
# for station BR3440, (472, 1103)
SSH_4_07_F = get.var.ncdf(SSH_4_07, "sla", start=c(1103, 472, 1), count= c(1,1,1))
#for station PC0610 and PC0620, ~ (478, 1098)
SSH_4_07_G = get.var.ncdf(SSH_4_07, "sla", start=c(1098, 478, 1), count= c(1,1,1))
# for PC1220, 33, 34, (476,1083)
SSH_4_07_H = get.var.ncdf(SSH_4_07, "sla", start=c(1083, 476, 1), count= c(1,1,1))
#for PC1320, He265, 37 ~ (474, 1078)
SSH_4_07_I = get.var.ncdf(SSH_4_07, "sla", start=c(1078, 474, 1), count= c(1,1,1))
# For PC1520 ~ (479, 1087)
SSH_4_07_J = get.var.ncdf(SSH_4_07, "sla", start=c(1087, 479, 1), count= c(1,1,1))
#For PC81460 (479, 1091)
SSH_4_07_K = get.var.ncdf(SSH_4_07, "sla", start=c(1091, 479, 1), count= c(1,1,1))
# For BOR0340 (471, 1104)
SSH_4_07_L = get.var.ncdf(SSH_4_07, "sla", start=c(1104, 471, 1), count= c(1,1,1))
# for BR0320 (471, 1107)
SSH_4_07_M = get.var.ncdf(SSH_4_07, "sla", start=c(1107, 471, 1), count= c(1,1,1))
#For 82 (472, 1102)
SSH_4_07_N = get.var.ncdf(SSH_4_07, "sla", start=c(1102, 472, 1), count= c(1,1,1))
# For WB16150 (475, 1080)
SSH_4_07_O = get.var.ncdf(SSH_4_07, "sla", start=c(1080, 475, 1), count= c(1,1,1))
For #51 (476, 1080)
SSH_4_07_P = get.var.ncdf(SSH_4_07, "sla", start=c(1080, 476, 1), count= c(1,1,1))
# for 16 (476, 1100)
SSH_4_07_Q = get.var.ncdf(SSH_4_07, "sla", start=c(1100, 476, 1), count= c(1,1,1))
# For 15 (476,1101)
SSH_4_07_R = get.var.ncdf(SSH_4_07, "sla", start=c(1101, 476, 1), count= c(1,1,1))
#For 28 (477, 1086)
SSH_4_07_S = get.var.ncdf(SSH_4_07, "sla", start=c(1086, 477, 1), count= c(1,1,1))
SSH_4_07_T = get.var.ncdf(SSH_4_07, "sla", start=c(1102, 477, 1), count= c(1,1,1))
#for Br 4/5 10 (477 1105)
SSH_4_07_U = get.var.ncdf(SSH_4_07, "sla", start=c(1105, 477, 1), count= c(1,1,1))
# for 27, PC1020 (478, 1086)
SSH_4_07_V = get.var.ncdf(SSH_4_07, "sla", start=c(1086, 478, 1), count= c(1,1,1))
# for PC1010 (479,1086)
SSH_4_07_W = get.var.ncdf(SSH_4_07, "sla", start=c(1086, 479, 1), count= c(1,1,1))
# for PC0920 (479, 1088)
SSH_4_07_X = get.var.ncdf(SSH_4_07, "sla", start=c(1088, 479, 1), count= c(1,1,1))
# For PC0910 (480, 1088)
SSH_4_07_Y = get.var.ncdf(SSH_4_07, "sla", start=c(1088, 480, 1), count= c(1,1,1))
# for PC1420 (480,1091)
SSH_4_07_Z = get.var.ncdf(SSH_4_07, "sla", start=c(1091, 480, 1), count= c(1,1,1))
# For WBSL840 (480, 1092)
SSH_4_07_AA = get.var.ncdf(SSH_4_07, "sla", start=c(1092, 480, 1), count= c(1,1,1))
# for PC0720 (481, 1095)
SSH_4_07_BB = get.var.ncdf(SSH_4_07, "sla", start=c(1095, 481, 1), count= c(1,1,1))
# for PC1510 (481, 1087)
SSH_4_07_CC = get.var.ncdf(SSH_4_07, "sla", start=c(1087, 481, 1), count= c(1,1,1))
#for PC0710 (482, 1096)
SSH_4_07_DD = get.var.ncdf(SSH_4_07, "sla", start=c(1096, 482, 1), count= c(1,1,1))
letters = c("A", "B", "C", "D","E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "AA", "BB", "CC", "DD")
mat_4_07= c(SSH_4_07_A, SSH_4_07_B, SSH_4_07_C, SSH_4_07_D, SSH_4_07_E, SSH_4_07_F, SSH_4_07_G, SSH_4_07_H, SSH_4_07_I, SSH_4_07_J, SSH_4_07_K, SSH_4_07_L, SSH_4_07_M, SSH_4_07_N, SSH_4_07_O, SSH_4_07_P, SSH_4_07_Q, SSH_4_07_R, SSH_4_07_S, SSH_4_07_T, SSH_4_07_U, SSH_4_07_V, SSH_4_07_W, SSH_4_07_X, SSH_4_07_Y, SSH_4_07_Z, SSH_4_07_AA, SSH_4_07_BB, SSH_4_07_CC, SSH_4_07_DD)
mat_4_07 <- data.frame(cbind(letters, mat_4_07))
|
56602d168e793500d254e414ed2a4afd1219dfc4
|
d6bd873a9b74236be1b016a496acaec69c0ee066
|
/man/modelList.Rd
|
c16827f6b8025781a23adf31c3660a2bcb3d4fd9
|
[] |
no_license
|
BenRollert/ensembler
|
4bd6be615f83546f3cf67d2ecd3210bd5117b36a
|
cda6a8e12dcfb6b68750044d71e4806ac2da1acc
|
refs/heads/master
| 2020-07-14T10:20:56.287335 | 2015-05-22T22:12:35 | 2015-05-22T22:12:35 | 35,344,162 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 789 |
rd
|
modelList.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/model_func.R
\name{modelList}
\alias{modelList}
\title{Create a list of caret train objects trained on multiple Domino instances.}
\usage{
modelList(dataset, models)
}
\arguments{
\item{dataset}{A character string specifying the name of the dataset you are training the models on.}
\item{models}{Character string vector specifying the names of the models you wish to load.}
}
\value{
a list of class "ensemble" containing caret train objects.
}
\description{
Assumes caret train objects are downloaded as .Rda files to a single machine. Loads Rda files and returns all caret models in a single list.
}
\examples{
models <- c("nnet", "rf", "gbm")
modelList(dataset = "BreastCancer", models = models)
}
|
bc634b4dd1fceb29146b38fde92cbda776f9a766
|
98c29220391a8fc864ba394536c6cde766dc8ecd
|
/standard_eqtl_calling/visualize_banovich_chrom_hmm_enrichment_analysis.R
|
105872cab79925b0d03cfe5be1fed09d64d78b46
|
[] |
no_license
|
BennyStrobes/ipsc_cardiomyocyte_differentiation
|
175d2a86b07e6027a343b79376a07eba7941607a
|
6f6ac227df5f7ea2cc9e89563447d429aae2eeb5
|
refs/heads/master
| 2021-07-11T07:09:15.169745 | 2020-07-02T15:37:54 | 2020-07-02T15:37:54 | 156,638,838 | 3 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 9,105 |
r
|
visualize_banovich_chrom_hmm_enrichment_analysis.R
|
args = commandArgs(trailingOnly=TRUE)
library(ggplot2)
library(ggthemes)
library(cowplot)
library(reshape)
load_in_odds_ratios <- function(file_name, adding_constant) {
aa <- read.table(file_name,header=TRUE)
real_overlaps <- as.numeric(aa$real_overlaps) + adding_constant
real_misses <- as.numeric(aa$real_misses) + adding_constant
perm_overlaps <- as.numeric(aa$perm_overlaps) + adding_constant
perm_misses <- as.numeric(aa$perm_misses) + adding_constant
odds_ratios <- (real_overlaps/real_misses)/(perm_overlaps/perm_misses)
return(odds_ratios)
}
odds_ratio_cell_line_specific_boxplot <- function(ipsc_early_or, ipsc_late_or, cardio_early_or, cardio_late_or, ipsc_change_or, cardio_change_or, output_file, marker_type) {
odds_ratios <- c()
roadmap_cell_types <- c()
dynamic_qtl_versions <- c()
odds_ratios <- c(odds_ratios, ipsc_early_or)
roadmap_cell_types <- c(roadmap_cell_types, rep("ipsc", length(ipsc_early_or)))
dynamic_qtl_versions <- c(dynamic_qtl_versions, rep("early_qtl", length(ipsc_early_or)))
odds_ratios <- c(odds_ratios, ipsc_late_or)
roadmap_cell_types <- c(roadmap_cell_types, rep("ipsc", length(ipsc_late_or)))
dynamic_qtl_versions <- c(dynamic_qtl_versions, rep("late_qtl", length(ipsc_late_or)))
odds_ratios <- c(odds_ratios, cardio_early_or)
roadmap_cell_types <- c(roadmap_cell_types, rep("heart", length(cardio_early_or)))
dynamic_qtl_versions <- c(dynamic_qtl_versions, rep("early_qtl", length(cardio_early_or)))
odds_ratios <- c(odds_ratios, cardio_late_or)
roadmap_cell_types <- c(roadmap_cell_types, rep("heart", length(cardio_early_or)))
dynamic_qtl_versions <- c(dynamic_qtl_versions, rep("late_qtl", length(cardio_late_or)))
odds_ratios <- c(odds_ratios, ipsc_change_or)
roadmap_cell_types <- c(roadmap_cell_types, rep("ipsc", length(ipsc_change_or)))
dynamic_qtl_versions <- c(dynamic_qtl_versions, rep("change_qtl", length(ipsc_change_or)))
odds_ratios <- c(odds_ratios, cardio_change_or)
roadmap_cell_types <- c(roadmap_cell_types, rep("heart", length(cardio_change_or)))
dynamic_qtl_versions <- c(dynamic_qtl_versions, rep("change_qtl", length(cardio_change_or)))
df <- data.frame(odds_ratio=odds_ratios,roadmap_cell_type=factor(roadmap_cell_types,levels=c("ipsc","heart")), qtl_version=factor(dynamic_qtl_versions))
# PLOT
boxplot <- ggplot(df, aes(x=roadmap_cell_type,y=odds_ratio,fill=qtl_version)) + geom_boxplot() + labs(x = "Roadmap Cell Type", y = "Odds Ratio", title= marker_type)
boxplot <- boxplot + theme(text = element_text(size=18))
boxplot <- boxplot + geom_hline(yintercept = 1.0)
boxplot <- boxplot
boxplot <- boxplot
ggsave(boxplot, file=output_file,width = 20,height=10.5,units="cm")
}
all_available_enrichments_boxplot <- function(cre, cell_lines, adding_constant, num_permutations, input_root, output_file, title) {
odds_ratios <- c()
roadmap_cell_types <- c()
for (cell_line_counter in 1:length(cell_lines)) {
cell_line <- cell_lines[cell_line_counter]
input_file <- paste0(input_root, cell_line, "_cell_lines_", cre, "_", num_permutations, "_enrich.txt")
or <- load_in_odds_ratios(input_file, adding_constant)
odds_ratios <- c(odds_ratios, or)
roadmap_cell_types <- c(roadmap_cell_types, rep(cell_line, length(or)))
}
df <- data.frame(odds_ratio=odds_ratios,roadmap_cell_type=factor(roadmap_cell_types,levels=c("all", "ipsc", "heart", "ipsc_only", "heart_only", "heart_and_ipsc")))
# PLOT
boxplot <- ggplot(df, aes(x=roadmap_cell_type,y=odds_ratio)) + geom_boxplot() + labs(x = "Roadmap Cell Type", y = "Odds Ratio", title= title)
boxplot <- boxplot + theme(text = element_text(size=18))
boxplot <- boxplot + geom_hline(yintercept = 1.0)
boxplot <- boxplot
boxplot <- boxplot
ggsave(boxplot, file=output_file,width = 26,height=10.5,units="cm")
}
all_hits_enrichments_boxplot <- function(cre, cell_lines, hits_versions, adding_constant, num_permutations, input_root, output_file, marker_type) {
odds_ratios <- c()
roadmap_cell_types <- c()
dynamic_qtl_versions <- c()
for (cell_line_counter in 1:length(cell_lines)) {
for (hits_version_counter in 1:length(hits_versions)) {
cell_line <- cell_lines[cell_line_counter]
hits_version <- hits_versions[hits_version_counter]
input_file <- paste0(input_root, cre, "_", cell_line, "_cell_lines_", hits_version,"_hits_", num_permutations, "_",cluster_assignment, ".txt" )
or <- load_in_odds_ratios(input_file, adding_constant)
odds_ratios <- c(odds_ratios, or)
roadmap_cell_types <- c(roadmap_cell_types, rep(cell_line, length(or)))
dynamic_qtl_versions <- c(dynamic_qtl_versions, rep(hits_version, length(or)))
}
}
df <- data.frame(odds_ratio=odds_ratios,roadmap_cell_type=factor(roadmap_cell_types,levels=c("all", "ipsc", "heart", "ipsc_only", "heart_only", "heart_and_ipsc")), qtl_version=factor(dynamic_qtl_versions))
# PLOT
boxplot <- ggplot(df, aes(x=roadmap_cell_type,y=odds_ratio,fill=qtl_version)) + geom_boxplot() + labs(x = "Roadmap Cell Type", y = "Odds Ratio", title= marker_type)
boxplot <- boxplot + theme(text = element_text(size=18))
boxplot <- boxplot + geom_hline(yintercept = 1.0)
boxplot <- boxplot + theme(legend.position="none")
boxplot <- boxplot
ggsave(boxplot, file=output_file,width = 26,height=10.5,units="cm")
}
only_all_enrichment_boxplot <- function(cell_line, adding_constant, num_permutations, input_root, output_file, title) {
odds_ratios <- c()
cre_version <- c()
cre <- "promotor"
input_file <- paste0(input_root, cell_line, "_cell_lines_", cre, "_", num_permutations, "_enrich.txt")
or <- load_in_odds_ratios(input_file, adding_constant)
odds_ratios <- c(odds_ratios, or)
cre_version <- c(cre_version, rep(cre, length(or)))
cre <- "enhancer"
input_file <- paste0(input_root, cell_line, "_cell_lines_", cre, "_", num_permutations, "_enrich.txt")
or <- load_in_odds_ratios(input_file, adding_constant)
odds_ratios <- c(odds_ratios, or)
cre_version <- c(cre_version, rep(cre, length(or)))
df <- data.frame(odds_ratio=odds_ratios, cre_type=factor(cre_version))
# PLOT
boxplot <- ggplot(df, aes(x=cre_type,y=odds_ratio,fill=cre_type)) + geom_boxplot() + labs(x = "CRE Type", y = "Odds Ratio",title=title)
boxplot <- boxplot + theme(text = element_text(size=18))
boxplot <- boxplot + geom_hline(yintercept = 1.0)
boxplot <- boxplot + theme(legend.position="none")
boxplot <- boxplot
ggsave(boxplot, file=output_file,width = 15,height=10.5,units="cm")
}
input_directory <- args[1]
visualization_directory <- args[2]
num_permutations <- args[3]
cell_line <- "all"
adding_constant <- 0
output_file <- paste0(visualization_directory, "banovich_ipsc_prom_enh_all_enrichments_num_perm_", num_permutations, "_odds_ratios.png")
only_all_enrichment_boxplot(cell_line, adding_constant, num_permutations, paste0(input_directory, "banovich_ipsc_"), output_file, "banovich_ipsc")
cell_line <- "all"
adding_constant <- 0
output_file <- paste0(visualization_directory, "banovich_cm_prom_enh_all_enrichments_num_perm_", num_permutations, "_odds_ratios.png")
only_all_enrichment_boxplot(cell_line, adding_constant, num_permutations, paste0(input_directory, "banovich_cm_"), output_file, "banovich_cm")
cell_lines <- c("all", "ipsc", "heart", "ipsc_only", "heart_only", "heart_and_ipsc")
adding_constant <- 1
########################
# Promoter
########################
cre <- "promotor"
output_file <- paste0(visualization_directory, "banovich_ipsc_",cre,"_num_perm_",num_permutations,"_odds_ratios.png")
all_available_enrichments_boxplot(cre, cell_lines, adding_constant, num_permutations, paste0(input_directory, "banovich_ipsc_"), output_file, "banovich_ipsc promotor")
########################
# Enhancer
########################
cre <- "enhancer"
output_file <- paste0(visualization_directory, "banovich_ipsc_",cre,"_num_perm_",num_permutations,"_odds_ratios.png")
all_available_enrichments_boxplot(cre, cell_lines, adding_constant, num_permutations, paste0(input_directory, "banovich_ipsc_"), output_file, "banovich_ipsc enhancer")
########################
# Promoter
########################
cre <- "promotor"
output_file <- paste0(visualization_directory, "banovich_cm_",cre,"_num_perm_",num_permutations,"_odds_ratios.png")
all_available_enrichments_boxplot(cre, cell_lines, adding_constant, num_permutations, paste0(input_directory, "banovich_cm_"), output_file, "banovich_cm promotor")
########################
# Enhancer
########################
cre <- "enhancer"
output_file <- paste0(visualization_directory, "banovich_cm_",cre,"_num_perm_",num_permutations,"_odds_ratios.png")
all_available_enrichments_boxplot(cre, cell_lines, adding_constant, num_permutations, paste0(input_directory, "banovich_cm_"), output_file, "banovich_cm enhancer")
|
0617135564a1868fd5455e7e73ea8cd74f4a06f0
|
9e835c1f388bfbb3cdfbacf7a99ac54ba1215857
|
/PEGASUS/analysis code.R
|
d4c900b7fb555108838c2849dbe39e86ba78c5f5
|
[] |
no_license
|
yizhenxu/Reinforcement-Learning
|
efcf1da09a13c8b623dfd34b61c6ee369f8ed9c4
|
9751e0dd8dfb01ba80513cad72e667e22bbc3ba1
|
refs/heads/master
| 2021-08-23T12:26:41.276211 | 2017-12-04T22:30:01 | 2017-12-04T22:30:01 | 113,102,399 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 3,028 |
r
|
analysis code.R
|
# change the probability of each direction to 0.225, so it is 0.9 prob for g(s,a,p) to use a
library("Rcpp")
library("RcppArmadillo")
library("parallel")
sourceCpp("~/codefoo.cpp")
map = matrix(0,5,5)
map[5,1] = 1 # start S
map[1,5] = 2 # end G
# 1up: i-1,j
# 2left: i,j-1
# 3down: i+1,j
# 4right: i,j+1
action.value = cbind(c(-1,0,1,0),c(0,-1,0,1))
# eachcat was 0.05 in the paper example, P(delta(s,a)) in g(s,a,p) would be 1 - eachcat*4
# location -> type of position
#up, left, down, right
#i0,j0, , --- 6
# ,j0,i6, --- 3 (S)
# , ,i6,j6 --- 8
# , , , --- 0 ->5
#i0, , , --- 4
# ,j0, , --- 2
# , ,i6, --- 1
# , , ,j6 --- 7
#i0, , ,j6 --- 11 (G)
# 1,2,3,4,5,6,7,8,11(G)
position = function(s){
s.new = s + action.value
type = sum(c(4,NA,1,NA,NA,2,NA,7)[s.new %in% c(0,6)])
if(type == 0) type = 5
return(type)
}
position.matrix = apply( expand.grid(1:5,1:5),1, position)
position.matrix = matrix(position.matrix,nrow=5)
# define policy class based on position
# policy: position -> action 1,2,3,4
# each column corresponds to a position -- 8 positions
policy.class = expand.grid(1:4,1:4,1:4,1:4,1:4,1:4,1:4,1:4)
gamma = 0.99
H = 100 # number of time steps
#mlist # number of simulated sample in each trial
#policy.num = 65536
#####################################################
###Train
maxm = 30
eachcat = 0.05
tmp = simplify2array(policy.class)
calc_all = function(mlist,eachcat){
plist = matrix(runif(mlist*H), ncol = H)# plist is k specific
look = pertrial_c(gamma, plist, tmp, position.matrix, action.value, eachcat )
return(look)
}
calc_all_rand = function(mlist,eachcat){
plist = matrix(runif(mlist*65536*H), ncol = H)# plist is k specific
look = pertrial_rand_c(gamma, plist, tmp, position.matrix, action.value, eachcat )
return(look)
}
set.seed(1)
opt_pi = matrix(NA,nrow = maxm, ncol = 8)
for(m in 1:maxm){
res = calc_all(m, eachcat) # 65536 x m
VM = apply(res, 1, mean)
opt_pi[m,] = tmp[which.max(VM),]
}
opt_pi_rand = matrix(NA,nrow = maxm, ncol = 8)
for(m in 1:maxm){
res = calc_all_rand(m, eachcat) # 65536 x m
VM = apply(res, 1, mean)
opt_pi_rand[m,] = tmp[which.max(VM),]
}
# maxm x K
K=10000
ptm <- proc.time()
randmat = matrix(runif(2*H*maxm*K),ncol=H)
opt_pi_mat = rbind(opt_pi, opt_pi_rand)
eval = Test_c(K, gamma, opt_pi_mat, position.matrix, action.value, eachcat, randmat)
meanPV = apply(eval, 1, mean)
proc.time() - ptm
save(meanPV,file = paste0("RLProj_K10000_TrainTest_seed1.RData"))
setwd("C:\\Users\\Yizhen Xu\\Google Drive\\Desktop\\2015 summer\\Reinforcement learning in HIV\\Project\\project code")
load("RLProj_K10000_TrainTest.RData")
load("RLProj_K10000_TrainTest_seed1.RData")
plot(1:30,meanPV[1:30],type = "o",ylim=c(min(meanPV),-7.5),xlab = "m", ylab="Mean Policy Value",main = "Figure 1b Replication (K=10,000)")
lines(1:30,meanPV[31:60],type = "o",col="red")
gamma =0.99
VH = function(E) -1*(1-gamma^E)/(1-gamma)
abline(h=VH(8))
text(locator(), "red line: random p \n black line: PEGASUS")
|
29cbdc218e8f45ffcca5c662113aa38bf2fec367
|
7ca4419b9a542ec7cd796db8b9ccf2828eaec062
|
/man/Cytosine.Rd
|
689e0be2e80f3d57a6deb9e6989c2ab182da181b
|
[] |
no_license
|
danielbraas/ShinyMetab
|
7d5f3688f3a2f3b5337d4c265104fd4f94ccf001
|
bd4767d912697f63a29324ec7a7ee981ff109f07
|
refs/heads/main
| 2023-02-05T02:59:48.169527 | 2020-12-29T05:47:58 | 2020-12-29T05:47:58 | 325,178,802 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 338 |
rd
|
Cytosine.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Cytosine}
\alias{Cytosine}
\title{A character vector of cytosine-related compounds.}
\format{
A character vector of length 9.
}
\usage{
Cytosine
}
\description{
A character vector of cytosine-related compounds.
}
\keyword{datasets}
|
d886387d2cb6f02eb5cfce33b90170158dff920e
|
6ebc3e12c3bfdd8c34b63c1da3cb10442cf70c3b
|
/R/commonplot.R
|
b66565ef47dee3116ab7b239855f427466ae1bcf
|
[] |
no_license
|
epicentre-msf/rosm
|
6f8900fcfed4f29bf5a6c532e6c991cce9bc6040
|
8f417038ccc09d9b00020f673ec6a1ad99ea224a
|
refs/heads/master
| 2021-01-22T21:57:53.840997 | 2017-04-07T15:43:24 | 2017-04-07T15:43:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,250 |
r
|
commonplot.R
|
#functions used by both google and osm
tile.cachedir <- function(type, cachedir=NULL) {
if(is.null(cachedir)) {
cachedir <- get_default_cachedir()
}
safename <- gsub("[^a-zA-z0-9]", "", type$name)
folder <- file.path(cachedir, safename)
created <- dir.create(folder, showWarnings=FALSE, recursive=TRUE)
folder
}
tile.plotarray <- function(image, box) {
graphics::rasterImage(image, box[1,1], box[2,1], box[1,2], box[2,2])
}
tile.autozoom <- function(res=150, epsg=4326) {
ext <- graphics::par("usr")
midy <- mean(c(ext[3], ext[4]))
rightmid <- .tolatlon(ext[2], midy, epsg)
centermid <- .tolatlon(mean(c(ext[1], ext[2])), midy, epsg)
leftmid <- .tolatlon(ext[1], midy, epsg)
anglewidth1 <- rightmid[1] - centermid[1]
if(anglewidth1 < 0) {
anglewidth1 <- anglewidth1+360
}
anglewidth2 <- rightmid[1] - centermid[1]
if(anglewidth2 < 0) {
anglewidth2 <- anglewidth2+360
}
anglewidth <- anglewidth1+anglewidth2
#PROBLEMS WITH WIDE EXTENTS LIKE THE WORLD
widthin <- graphics::grconvertX(ext[2], from="user", to="inches") -
graphics::grconvertX(ext[1], from="user", to="inches")
widthpx <- widthin * res
zoom = log2((360.0 / anglewidth) * (widthpx / 256.0))
as.integer(floor(zoom))
}
|
8fb8e16577c0a926adc30279c9b2557b606d49e8
|
3f436064cd2299140e328117a2c0611281c9691e
|
/Chapter 2/0-setup.R
|
5edd89887efb619869e9d54ef50cab5cbd99305d
|
[] |
no_license
|
ZhangWS/dissertation
|
deb9e7f7bd1fd945c0266c1db27073e02f93e7bb
|
be9761f1c0ed5d05e8c377438c9407eeffee3102
|
refs/heads/master
| 2020-03-19T15:35:50.447168 | 2019-02-05T23:54:53 | 2019-02-05T23:54:53 | 136,677,561 | 1 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,141 |
r
|
0-setup.R
|
##################################
# CHAPTER ONE preliminary setup
##################################
#Make sure you run this before embarking on analyses for Sections 1-3
library(foreign)
library(dplyr)
library(ggplot2)
library(reshape2)
library(DescTools)
library(tidyr)
library(lsr)
#Read in initial data
setwd(".") #make sure your data is in the same directory!
d.data <- read.csv("20171120_dissertation_data.csv")
attach(d.data)
eng <- filter(d.data, Lang == 1)
chn <- filter(d.data, Lang == 2)
gain <- filter(d.data, Q1Form==1)
loss <- filter(d.data, Q1Form==2)
#Basic Data Ex
nrow(d.data) #477 respondents
table(Q52_Age)
mean(Q52_Age, na.rm=T) #20.16 years
sd(Q52_Age, na.rm=T) #1.51 years
median(Q52_Age, na.rm=T) #20
table(Q53_Gender) #107 males, 364 females
table(Q54_Education)
#Language ability - MSM proficiency
sum(!is.na(chn$Q58_Chinese_ability)) #224
mean(chn$Q58_Chinese_ability, na.rm=T)
sd(chn$Q58_Chinese_ability, na.rm=T) #1.38
#English Proficiency
sum(!is.na(eng$Q58_Chinese_ability)) #98 responses
mean(eng$Q58_Chinese_ability, na.rm=T) #7.34
sd(eng$Q58_Chinese_ability, na.rm=T) #2.04#Questions analyzed:
|
e0f8dbe28bd54d2d06f9e19432c1706036e7da59
|
1291bf249bff01814610befd45c512580beb9f2f
|
/man/dyCandlestick.Rd
|
fc3fedfe6d32e3b7009ed66a5036a89aee3eba17
|
[] |
no_license
|
pz10/dygraphs
|
058875bcb7126e1f5056564ae96b225a270d4fb2
|
a4e3553005a021fbf597b97ed5b9170f37bb611c
|
refs/heads/master
| 2021-01-19T14:28:08.771038 | 2017-03-19T14:37:07 | 2017-03-19T14:37:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 643 |
rd
|
dyCandlestick.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/candlestick.R
\name{dyCandlestick}
\alias{dyCandlestick}
\title{Candlestick plotter for dygraph chart}
\usage{
dyCandlestick(dygraph, compress = FALSE)
}
\arguments{
\item{dygraph}{Dygraph to draw chart on}
\item{compress}{If true, compress data yearly, quarterly, monthly, weekly or daily
according to overall amount of bars and/or current zoom level.}
}
\value{
Dygraph with specified candlestick plotter
}
\description{
Draw a candlestick chart.
}
\examples{
library(xts)
data(sample_matrix)
library(dygraphs)
dygraph(sample_matrix) \%>\%
dyCandlestick()
}
|
9da201db0c28c1d76251f17233c8da93f5e8c019
|
fe254ef6be0bd316d41b6796ef28f1c9e1d5551e
|
/R/CubeCoord.R
|
d1af468de3d9c94e9bb315117f5e388284931de0
|
[] |
no_license
|
matthias-da/robCompositions
|
89b26d1242b5370d78ceb5b99f3792f0b406289f
|
a8da6576a50b5bac4446310d7b0e7c109307ddd8
|
refs/heads/master
| 2023-09-02T15:49:40.315508 | 2023-08-23T12:54:36 | 2023-08-23T12:54:36 | 14,552,562 | 8 | 6 | null | 2019-12-12T15:20:57 | 2013-11-20T09:44:25 |
C++
|
UTF-8
|
R
| false | false | 22,497 |
r
|
CubeCoord.R
|
#' cubeCoord
#'
#' @name cubeCoord
#' @rdname cubeCoord
#' @importFrom tidyr unite
#' @importFrom tidyr spread
#' @importFrom graphics boxplot
#' @title Coordinate representation of a compositional cube and of a sample of compositional cubes
#' @aliases cubeCoord
#' @aliases cubeCoordWrapper
#' @importFrom tidyr unite
#' @importFrom tidyr spread
#' @author Kamila Facevicova
#' @references Facevicova, K., Filzmoser, P. and K. Hron (2019) Compositional Cubes: Three-factorial Compositional Data. Under review.
#' @description cubeCoord computes a system of orthonormal coordinates of a compositional cube.
#' Computation of either pivot coordinates or a coordinate system based on the given SBP is possible.
#'
#' @param x a data frame containing variables representing row, column and slice factors of the respective compositional cube and variable with the values of the composition.
#' @param row.factor name of the variable representing the row factor. Needs to be stated with the quotation marks.
#' @param col.factor name of the variable representing the column factor. Needs to be stated with the quotation marks.
#' @param slice.factor name of the variable representing the slice factor. Needs to be stated with the quotation marks.
#' @param value name of the variable representing the values of the composition. Needs to be stated with the quotation marks.
#' @param SBPr an \eqn{I-1\times I} array defining the sequential binary partition of the values of the row factor, where I is the number of the row factor levels. The values assigned in the given step to the + group are marked by 1, values from the - group by -1 and the rest by 0. If it is not provided, the pivot version of coordinates is constructed automatically.
#' @param SBPc an \eqn{J-1\times J} array defining the sequential binary partition of the values of the column factor, where J is the number of the column factor levels. The values assigned in the given step to the + group are marked by 1, values from the - group by -1 and the rest by 0. If it is not provided, the pivot version of coordinates is constructed automatically.
#' @param SBPs an \eqn{K-1\times K} array defining the sequential binary partition of the values of the slice factor, where K is the number of the slice factor levels. The values assigned in the given step to the + group are marked by 1, values from the - group by -1 and the rest by 0. If it is not provided, the pivot version of coordinates is constructed automatically.
#' @param pivot logical, default is FALSE. If TRUE, or one of the SBPs is not defined, its pivot version is used.
#' @param print.res logical, default is FALSE. If TRUE, the output is displayed in the Console.
#' @details This transformation moves the IJK-part compositional cubes from the simplex into a (IJK-1)-dimensional real space isometrically with respect to its three-factorial nature.
#' @keywords multivariate
#' @export
#' @seealso
#' \code{\link{tabCoord}}
#' \code{\link{tabCoordWrapper}}
#' @return
#' \item{Coordinates}{an array of orthonormal coordinates.}
#' \item{Grap.rep}{graphical representation of the coordinates.
#' Parts denoted by + form the groups in the numerator of the respective computational formula,
#' parts - form the denominator and parts . are not involved in the given coordinate.}
#' \item{Row.balances}{an array of row balances.}
#' \item{Column.balances}{an array of column balances.}
#' \item{Slice.balances}{an array of slice balances.}
#' \item{Row.column.OR}{an array of row-column OR coordinates.}
#' \item{Row.slice.OR}{an array of row-slice OR coordinates.}
#' \item{Column.slice.OR}{an array of column-slice OR coordinates.}
#' \item{Row.col.slice.OR}{an array of coordinates describing the mutual interaction between all three factors.}
#' \item{Contrast.matrix}{contrast matrix.}
#' \item{Log.ratios}{an array of pure log-ratios between groups of parts without the normalizing constant.}
#' \item{Coda.cube}{cube form of the given composition.}
#' \item{Bootstrap}{array of sample means, standard deviations and bootstrap confidence intervals.}
#' \item{Cubes}{Cube form of the given compositions.}
#' @examples
#' ###################
#' ### Coordinate representation of a CoDa Cube
#' \dontrun{
#' ### example from Fa\v cevicov\'a (2019)
#' data(employment2)
#' CZE <- employment2[which(employment2$Country == 'CZE'), ]
#'
#' # pivot coordinates
#' cubeCoord(CZE, "Sex", 'Contract', "Age", 'Value')
#'
#' # coordinates with given SBP
#'
#' r <- t(c(1,-1))
#' c <- t(c(1,-1))
#' s <- rbind(c(1,-1,-1), c(0,1,-1))
#'
#' cubeCoord(CZE, "Sex", 'Contract', "Age", 'Value', r,c,s)
#' }
cubeCoord <- function(x, row.factor=NULL, col.factor=NULL, slice.factor=NULL, value=NULL, SBPr=NULL, SBPc=NULL, SBPs=NULL, pivot=FALSE, print.res=FALSE)
{
# Control and subsidiary parameters setting
if(is.null(row.factor)) stop('Name of the row factor is not defined!')
if(is.null(col.factor)) stop('Name of the column factor is not defined!')
if(is.null(slice.factor)) stop('Name of the slice factor is not defined!')
if(is.null(value)) stop('Name of the value variable is not defined!')
x[,row.factor] <- as.factor(x[,row.factor])
x[,col.factor] <- as.factor(x[,col.factor])
x[,slice.factor] <- as.factor(x[,slice.factor])
I <- nlevels(x[,row.factor]) # number of row factor levels
J <- nlevels(x[,col.factor]) # number of column factor levels
K <- nlevels(x[,slice.factor]) # number of slice factor levels
y <- x[,c(row.factor, col.factor, slice.factor, value)]
x_vec <- y[order(y[,1], y[,2], y[,3]),4] # vectorized cube according to Facevicova19
if(!identical(as.numeric(table(x[,c(row.factor, col.factor)])),as.numeric(rep(K,I*J)))) stop('The CoDa Cube x is not defined properly, some values are missing!')
if(!is.null(SBPr)&(nrow(SBPr)!= (I-1)||ncol(SBPr)!=I))
{warning('The row SBP is not defined properly, pivot coordinates are used!')
SBPr <- NULL}
if(!is.null(SBPc)&(nrow(SBPc)!= (J-1)||ncol(SBPc)!=J))
{warning('The column SBP is not defined properly, pivot coordinates are used!')
SBPc <- NULL}
if(!is.null(SBPs)&(nrow(SBPs)!= (K-1)||ncol(SBPs)!=K))
{warning('The slice SBP is not defined properly, pivot coordinates are used!')
SBPs <- NULL}
### Definition of pivot SBP (if necessary)
if(is.null(SBPr)||pivot==TRUE)
{
SBPr <- numeric()
for(j in 1:(I-1))
{
novy <- c(rep(0,j-1), 1, rep(-1, I-j))
SBPr <- rbind(SBPr, novy)
}
#print("SBP of row factor is not defined, its pivot version is used!")
}
rownames(SBPr) <- NULL
if(is.null(SBPc)||pivot==TRUE)
{
SBPc <- numeric()
for(j in 1:(J-1))
{
novy <- c(rep(0,j-1), 1, rep(-1, J-j))
SBPc <- rbind(SBPc, novy)
}
#print("SBP of column factor is not defined, its pivot version is used!")
}
rownames(SBPc) <- NULL
if(is.null(SBPs)||pivot==TRUE)
{
SBPs <- numeric()
for(j in 1:(K-1))
{
novy <- c(rep(0,j-1), 1, rep(-1, K-j))
SBPs <- rbind(SBPs, novy)
}
#print("SBP of slice factor is not defined, its pivot version is used!")
}
rownames(SBPs) <- NULL
log_kontrasty <- function(x){
r <- length(which(x==1))
s <- length(which(x==-1))
koef1 <- sqrt((r*s)/(r+s))*(1/r)
koef2 <- -sqrt((r*s)/(r+s))*(1/s)
log_kontrast <- rep(0, length(x))
log_kontrast[which(x==1)] <- koef1
log_kontrast[which(x==-1)] <- koef2
return(log_kontrast)
}
norm_const_balance <- function(x){ #based on vector of contrasts
r <- length(which(x>0))
s <- length(which(x<0))
koef <- sqrt((r*s)/(r+s))
return(koef)
}
norm_const_OR <- function(x){ #based on vector of contrasts
kladne = table(x[which(x>0)])
celkem = length(which(x!=0))
if(dim(kladne)==2)
koef=sqrt((kladne[1]*kladne[2])/celkem)
else
koef=sqrt((kladne*kladne/4)/celkem)
names(koef) = NULL
return(koef)
}
norm_const_ORR <- function(x,y,z){ #based on SBPr, SBPc, SBPs
A = length(which(x>0))*length(which(y>0))*length(which(z>0))
H = length(which(x<0))*length(which(y<0))*length(which(z<0))
vse = length(which(x!=0))*length(which(y!=0))*length(which(z!=0))
koef=sqrt(A*H/vse)
return(koef)
}
### expansion of SBP to the whole table
SBPr_cele <- matrix(SBPr[,rep(c(1:I), each=J*K)], ncol=I*J*K)
SBPc_cele <- matrix(SBPc[,rep(rep(c(1:J), each=K), I)], ncol=I*J*K)
SBPs_cele <- matrix(SBPs[,rep(c(1:K),I*J)], ncol=I*J*K)
### Generating vectors of:
# balances
LCr <- t(apply(SBPr_cele, 1, FUN=log_kontrasty))
LCc <- t(apply(SBPc_cele, 1, FUN=log_kontrasty))
LCs <- t(apply(SBPs_cele, 1, FUN=log_kontrasty))
# pairwise interaction coordinates
OR_deleni_r_c <- NULL
for(i in 1:(I-1))
{
for(j in 1:(J-1))
{
novy <- LCr[i,]*LCc[j,]
OR_deleni_r_c <- rbind(OR_deleni_r_c, novy)
}
}
rownames(OR_deleni_r_c) <- NULL
OR_deleni_r_s <- NULL
for(i in 1:(I-1))
{
for(k in 1:(K-1))
{
novy <- LCr[i,]*LCs[k,]
OR_deleni_r_s <- rbind(OR_deleni_r_s, novy)
}
}
rownames(OR_deleni_r_s) <- NULL
OR_deleni_s_c <- NULL
for(k in 1:(K-1))
{
for(j in 1:(J-1))
{
novy <- LCs[k,]*LCc[j,]
OR_deleni_s_c <- rbind(OR_deleni_s_c, novy)
}
}
rownames(OR_deleni_s_c) <- NULL
# full interaction coordinates
OR_deleni_r_c_s <- NULL
norm.constants.ORR <- NULL
for(i in 1:(I-1))
{
for(j in 1:(J-1))
{
for(k in 1:(K-1))
{
novy <- LCr[i,]*LCc[j,]*LCs[k,]
OR_deleni_r_c_s <- rbind(OR_deleni_r_c_s, novy)
const.nova <- norm_const_ORR(SBPr[i,],SBPc[j,],SBPs[k,])
norm.constants.ORR <- c(norm.constants.ORR, const.nova)
}
}
}
rownames(OR_deleni_r_c_s) <- NULL
OR_deleni <- rbind(OR_deleni_r_c, OR_deleni_r_s, OR_deleni_s_c, OR_deleni_r_c_s)
### matrix with generating vectors. Important for the back-transformation!
normovani <- function(x){x/(norm(as.matrix(x), type="f"))}
OR_contrasts <- t(apply(OR_deleni, 1, FUN=normovani))
contrasts <- rbind(LCr, LCc, LCs, OR_contrasts)
coord.names <- c(paste('z', 1:(I-1), '^r', sep=''), paste('z', 1:(J-1), '^c', sep=''), paste('z', 1:(K-1), '^s', sep=''),
paste('z', sort(outer(c(1:(I-1)),c(1:(J-1)), FUN=function(x,y)paste(x,y,sep=''))), '^rc', sep=''),
paste('z', sort(outer(c(1:(I-1)),c(1:(K-1)), FUN=function(x,y)paste(x,y,sep=''))), '^rs', sep=''),
paste('z', sort(outer(c(1:(J-1)),c(1:(K-1)), FUN=function(x,y)paste(x,y,sep=''))), '^cs', sep=''),
paste('z', sort(outer(outer(c(1:(I-1)),c(1:(J-1)), FUN=function(x,y)paste(x,y,sep='')),c(1:(K-1)), FUN=function(x,y)paste(x,y,sep=''))), '^rcs', sep=''))
rownames(contrasts) <- coord.names
colnames(contrasts) <- c(paste('x', sort(outer(outer(c(1:(I)),c(1:(J)), FUN=function(x,y)paste(x,y,sep='')),c(1:(K)), FUN=function(x,y)paste(x,y,sep=''))), sep=''))
### Coordinates
souradnice <- contrasts%*%log(x_vec)
rownames(souradnice) <- coord.names
### Pure log-ratios between groups of parts (without normalizing constant)
norm.constants.balance = apply(contrasts[1:(I+J+K-3), ], 1, norm_const_balance)
norm.constants.OR = apply(contrasts[(I+J+K-2):(I*J+I*K+K*J-I-J-K), ], 1, norm_const_OR)
# norm. constants for ORR coordinates were already computed with these coordinates
norm.constants = c(norm.constants.balance, norm.constants.OR, norm.constants.ORR)
log.ratios = souradnice/norm.constants
### Table form of the CoDa table
tab0 <- unite(y, slice.factor, col.factor, col='col_slice')
tab <- spread(tab0, 'col_slice', value)[,-1]
colnames(tab) <- levels(as.factor(tab0[,'col_slice']))
rownames(tab) <- levels(tab0[,row.factor])
### Graphical representation of groups within table:
grap.rep <- list()
permutation <- order(tab0[,1])
for(i in 1:nrow(contrasts))
{
grap.r <- rep(".",ncol(contrasts))
grap.r[which(contrasts[i,]>0)] <- "+"
grap.r[which(contrasts[i,]<0)] <- "-"
grap.r <- data.frame(tab0[permutation,c(1,2)], grap.r)
grap.r.tab <- spread(grap.r, 'col_slice', grap.r)[,-1]
row.names(grap.r.tab) <- levels(tab0[,row.factor])
grap.rep[[i]] <- grap.r.tab
}
names(grap.rep) <- coord.names
### Result:
if(print.res==TRUE)
{
print("Row balances:")
print(souradnice[c(1:(I-1))])
print(grap.rep[c(1:(I-1))])
print("Column balances:")
print(souradnice[c(I:(I+J-2))])
print(grap.rep[c(I:(I+J-2))])
print("Slice balances:")
print(souradnice[c((I+J-1):(I+J+K-3))])
print(grap.rep[c((I+J-1):(I+J+K-3))])
print("Row and Column odds ratio coordinates:")
print(souradnice[c((I+J+K-2):(I*J+K-2))])
print(grap.rep[c((I+J+K-2):(I*J+K-2))])
print("Row and Slice odds ratio coordinates:")
print(souradnice[c((I*J+K-1):(I*J+I*K-I-1))])
print(grap.rep[c((I*J+K-1):(I*J+I*K-I-1))])
print("Column and Slice odds ratio coordinates:")
print(souradnice[c((I*J+I*K-I):(I*J+I*K+K*J-I-J-K))])
print(grap.rep[c((I*J+I*K-I):(I*J+I*K+K*J-I-J-K))])
print("Row, Column and Slice odds ratio coordinates:")
print(souradnice[c((I*J+I*K+K*J-I-J-K+1):(I*J*K-1))])
print(grap.rep[c((I*J+I*K+K*J-I-J-K+1):(I*J*K-1))])
}
result <- list("Coordinates"=souradnice, "Grap.rep" = grap.rep, "Row.balances"=souradnice[c(1:(I-1)),1],
"Column.balances"=souradnice[c(I:(I+J-2)),1], "Slice.balances"=souradnice[c((I+J-1):(I+J+K-3)),1],
"Row.column.OR"=souradnice[c((I+J+K-2):(I*J+K-2)),1], "Row.slice.OR"=souradnice[c((I*J+K-1):(I*J+I*K-I-1)),1],
"Column.slice.OR"=souradnice[c((I*J+I*K-I):(I*J+I*K+K*J-I-J-K)),1], "Row.col.slice.OR"=souradnice[c((I*J+I*K+K*J-I-J-K+1):(I*J*K-1)),1],
'Log.ratios'=log.ratios, "Contrast.matrix" = contrasts, 'Coda.cube'=tab)
return(result)
}
#' @rdname cubeCoord
#' @param X a data frame containing variables representing row, column and slice factors
#' of the respective compositional cubes, variable with the values
#' of the composition and variable distinguishing the observations.
#' @param obs.ID name of the variable distinguishing the observations. Needs to be stated with the quotation marks.
#' @param test logical, default is FALSE. If TRUE, the bootstrap analysis of coordinates is provided.
#' @param n.boot number of bootstrap samples.
#' @description Wrapper (cubeCoordWrapper): For each compositional cube in the sample cubeCoordWrapper computes
#' a system of orthonormal coordinates and provide a simple descriptive analysis.
#' Computation of either pivot coordinates or a coordinate system based on the
#' given SBP is possible.
#' @details Wrapper (cubeCoordWrapper): Each of n IJK-part compositional cubes from the sample is
#' with respect to its three-factorial nature isometrically transformed
#' from the simplex into a (IJK-1)-dimensional real space.
#' Sample mean values and standard deviations are computed and using
#' bootstrap an estimate of 95 \% confidence interval is given.
#' @export
#' @examples
#'
#' ###################
#' ### Analysis of a sample of CoDa Cubes
#' \dontrun{
#' ### example from Fa\v cevicov\'a (2019)
#' data(employment2)
#' ### Compositional tables approach,
#' ### analysis of the relative structure.
#' ### An example from Facevi\v cov\'a (2019)
#'
#' # pivot coordinates
#' cubeCoordWrapper(employment2, 'Country', 'Sex', 'Contract', 'Age', 'Value',
#' test=TRUE)
#'
#' # coordinates with given SBP (defined in the paper)
#'
#' r <- t(c(1,-1))
#' c <- t(c(1,-1))
#' s <- rbind(c(1,-1,-1), c(0,1,-1))
#'
#' res <- cubeCoordWrapper(employment2, 'Country', 'Sex', 'Contract',
#' "Age", 'Value', r,c,s, test=TRUE)
#'
#' ### Classical approach,
#' ### generalized linear mixed effect model.
#'
#' library(lme4)
#' employment2$y <- round(employment2$Value*1000)
#' glmer(y~Sex*Age*Contract+(1|Country),data=employment2,family=poisson)
#'
#' ### other relations within cube (in the log-ratio form)
#' ### e.g. ratio between women and man in the group FT, 15to24
#' ### and ratio between age groups 15to24 and 55plus
#'
#' # transformation matrix
#' T <- rbind(c(1,rep(0,5), -1, rep(0,5)), c(rep(c(1/4,0,-1/4), 4)))
#' T %*% t(res$Contrast.matrix) %*%res$Bootstrap[,1]
#' }
cubeCoordWrapper <- function(X, obs.ID=NULL, row.factor=NULL, col.factor=NULL, slice.factor=NULL,
value=NULL, SBPr=NULL, SBPc=NULL, SBPs=NULL, pivot=FALSE,
test=FALSE, n.boot=1000){
# Control and subsidiary parameters setting
if(is.null(obs.ID)) stop('Name of the observation ID variable is not defined!')
if(is.null(row.factor)) stop('Name of the row factor is not defined!')
if(is.null(col.factor)) stop('Name of the column factor is not defined!')
if(is.null(slice.factor)) stop('Name of the slice factor is not defined!')
if(is.null(value)) stop('Name of the value variable is not defined!')
X[,obs.ID] <- as.factor(X[,obs.ID])
X[,row.factor] <- as.factor(X[,row.factor])
X[,col.factor] <- as.factor(X[,col.factor])
X[,slice.factor] <- as.factor(X[,slice.factor])
N <- nlevels(X[,obs.ID])
I <- nlevels(X[,row.factor]) # number of row factor levels
J <- nlevels(X[,col.factor]) # number of column factor levels
K <- nlevels(X[,slice.factor]) # number of slice factor levels
if(!identical(as.numeric(table(X[,c(row.factor,obs.ID)])),as.numeric(rep(J*K,(I*N))))) stop('The CoDa Cubes are not defined properly, some values are missing!')
if(!identical(as.numeric(table(X[,c(col.factor,obs.ID)])),as.numeric(rep(I*K,(J*N))))) stop('The CoDa Cubes are not defined properly, some values are missing!')
if(!is.null(SBPr)&(nrow(SBPr)!= (I-1)||ncol(SBPr)!=I))
{warning('The row SBP is not defined properly, pivot coordinates are used!')
SBPr <- NULL}
if(!is.null(SBPc)&(nrow(SBPc)!= (J-1)||ncol(SBPc)!=J))
{warning('The column SBP is not defined properly, pivot coordinates are used!')
SBPc <- NULL}
if(!is.null(SBPs)&(nrow(SBPs)!= (K-1)||ncol(SBPs)!=K))
{warning('The slice SBP is not defined properly, pivot coordinates are used!')
SBPs <- NULL}
Coordinates <- NULL
Log.ratios <- NULL
Row.balances <- NULL
Column.balances <- NULL
Slice.balances <- NULL
Row.column.OR <- NULL
Row.slice.OR <- NULL
Column.slice.OR <- NULL
Row.col.slice.OR <- NULL
Tables <- array(NA, c(nlevels(X[,row.factor]), nlevels(X[,col.factor])*nlevels(X[,slice.factor]), N))
for(i in 1:N)
{
obs <- which(X[,obs.ID]==levels(X[,obs.ID])[i])
new <- cubeCoord(x=X[obs,], row.factor=row.factor, col.factor=col.factor, slice.factor=slice.factor, value=value, SBPr=SBPr, SBPc=SBPc, SBPs=SBPs, pivot=pivot, print.res=FALSE)
Coordinates <- cbind(Coordinates, new$Coordinates)
Log.ratios <- cbind(Log.ratios, new$Log.ratios)
Row.balances <- cbind(Row.balances, new$Row.balances)
Column.balances <- cbind(Column.balances, new$Column.balances)
Slice.balances <- cbind(Slice.balances, new$Slice.balances)
Row.column.OR <- cbind(Row.column.OR, new$Row.column.OR)
Row.slice.OR <- cbind(Row.slice.OR, new$Row.slice.OR)
Column.slice.OR <- cbind(Column.slice.OR, new$Column.slice.OR)
Row.col.slice.OR <- cbind(Row.col.slice.OR, new$Row.col.slice.OR)
Tables[,,i] <- as.matrix(new$Coda.cube)
}
Coordinates <- t(Coordinates)
rownames(Coordinates) <- levels(X[,obs.ID])
colnames(Log.ratios) <- levels(X[,obs.ID])
colnames(Row.balances) <- levels(X[,obs.ID])
colnames(Column.balances) <- levels(X[,obs.ID])
colnames(Slice.balances) <- levels(X[,obs.ID])
colnames(Row.column.OR) <- levels(X[,obs.ID])
colnames(Row.slice.OR) <- levels(X[,obs.ID])
colnames(Column.slice.OR) <- levels(X[,obs.ID])
colnames(Row.col.slice.OR) <- levels(X[,obs.ID])
dimnames(Tables)[[1]] <- levels(X[,row.factor])
dimnames(Tables)[[2]] <- colnames(new$Grap.rep[[1]])
dimnames(Tables)[[3]] <- levels(X[,obs.ID])
res <- list('Coordinates'=Coordinates, 'Log.ratios'=t(Log.ratios), 'Row.balances'=t(Row.balances),
'Column.balances'=t(Column.balances), 'Slice.balances'=t(Slice.balances),
'Row.column.OR'=t(Row.column.OR), 'Row.slice.OR'=t(Row.slice.OR), 'Column.slice.OR'=t(Column.slice.OR),
'Row.col.slice.OR'=t(Row.col.slice.OR),
'Grap.rep'=new$Grap.rep, 'Contrast.matrix'=new$Contrast.matrix, 'Cubes'=Tables )
if(test==TRUE)
{
# sample characteristics
mean <- apply(Coordinates, 2, mean)
sd <- apply(Coordinates, 2, sd)
#set.seed(123)
I <- nlevels(X[,row.factor]) # number of row factor values
J <- nlevels(X[,col.factor]) # number of column factor values
K <- nlevels(X[,slice.factor]) # number of slice factor values
# pocet opakovani bootstrapu
opakovani <- n.boot
xlab <- c(paste('z', 1:(I-1), '^r', sep=''), paste('z', 1:(J-1), '^c', sep=''), paste('z', 1:(K-1), '^s', sep=''),
paste('z', sort(outer(c(1:(I-1)),c(1:(J-1)), FUN=function(x,y)paste(x,y,sep=''))), '^rc', sep=''),
paste('z', sort(outer(c(1:(I-1)),c(1:(K-1)), FUN=function(x,y)paste(x,y,sep=''))), '^rs', sep=''),
paste('z', sort(outer(c(1:(J-1)),c(1:(K-1)), FUN=function(x,y)paste(x,y,sep=''))), '^cs', sep=''),
paste('z', sort(outer(outer(c(1:(I-1)),c(1:(J-1)), FUN=function(x,y)paste(x,y,sep='')),c(1:(K-1)), FUN=function(x,y)paste(x,y,sep=''))), '^rcs', sep=''))
boxplot(Coordinates, notch=TRUE, names=xlab)
abline(a=0, b=0, lty="dashed")
means <- t(replicate(opakovani,apply(Coordinates[sample(N,replace=TRUE),],2,mean)))
CIl <- apply(means,2,quantile,0.025)
CIu <- apply(means,2,quantile,0.975)
Bootstrap <- cbind(mean, sd, CIl, CIu)
res <- list('Coordinates'=Coordinates, 'Log.ratios'=t(Log.ratios), 'Row.balances'=t(Row.balances),
'Column.balances'=t(Column.balances), 'Slice.balances'=t(Slice.balances),
'Row.column.OR'=t(Row.column.OR), 'Row.slice.OR'=t(Row.slice.OR), 'Column.slice.OR'=t(Column.slice.OR),
'Row.col.slice.OR'=t(Row.col.slice.OR),
'Grap.rep'=new$Grap.rep, 'Contrast.matrix'=new$Contrast.matrix, 'Cubes'=Tables, 'Bootstrap'=Bootstrap)
}
return(res)
}
|
d8cef0c611b78ed8bcc99e3851080f9a5141daf3
|
2b5728585d67ad9f0210a21189459a1515faa72f
|
/R/fullFact.R
|
ea42c23f8f6e2b1f7ea8ad2ccba5ed27a4f1c21a
|
[] |
no_license
|
Matherion/userfriendlyscience
|
9fb8dd5992dcc86b84ab81ca98d97b9b65cc5133
|
46acf718d692a42aeebdbe9a6e559a7a5cb50c77
|
refs/heads/master
| 2020-12-24T16:35:32.356423 | 2018-09-25T06:41:14 | 2018-09-25T06:41:14 | 49,939,242 | 15 | 9 | null | 2018-11-17T10:34:37 | 2016-01-19T08:50:54 |
R
|
UTF-8
|
R
| false | false | 1,324 |
r
|
fullFact.R
|
#' fullFact
#'
#' This function provides a userfriendly interface to a number of advanced
#' factor analysis functions in the \code{\link{psych}} package.
#'
#'
#' @param dat Datafile to analyse; if NULL, a pop-up is provided to select a
#' file.
#' @param items Which variables (items) to factor-analyse. If NULL, all are
#' selected.
#' @param rotate Which rotation to use (see \code{\link{psych}} package).
#' @return The outcomes, which are printed to the screen unless assigned.
#' @author Gjalt-Jorn Peters
#'
#' Maintainer: Gjalt-Jorn Peters <gjalt-jorn@@userfriendlyscience.com>
#' @seealso \code{\link{fa.parallel}}, \code{\link{vss}}
#' @keywords univariate
#' @examples
#'
#' \dontrun{
#' ### Not run to save processing during package testing
#' fullFact(attitude);
#' }
#'
#' @export fullFact
fullFact <- function(dat = NULL, items=NULL, rotate='oblimin') {
res <- list(input = as.list(environment()),
intermediate = list(),
output = list());
if (is.null(dat)) {
dat <- getData();
}
if (is.null(items)) {
items <- names(dat);
}
res$output$parallel <- fa.parallel(dat[, items]);
res$output$vss <- vss(dat[, items], rotate=rotate);
class(res) <- 'fullFact';
return(res);
}
print.fullFact <- function(x, ...) {
print(x$output);
}
|
79b0227f06ba17135b49df2df53ffe9cab2b34e9
|
3838084df843d65746fcdd9a7eb274cd2087aece
|
/Examples/tracking_debugg.R
|
1ed2bd42eca82ebbc815bfd7689b1b719b5303a2
|
[] |
no_license
|
jie108/FOD_Needlets_codes
|
772b2ff5bbb537725dcafa2b5be8887d6a626ff9
|
76223d7598941ad1f8e7989715a26fb295ad56e5
|
refs/heads/master
| 2020-08-15T21:50:02.140122 | 2019-10-15T23:10:58 | 2019-10-15T23:10:58 | 215,412,811 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 11,025 |
r
|
tracking_debugg.R
|
rm(list=ls())
library(R.matlab)
library(rgl)
library(compositions)
source("dwi_fit.R")
source("dwi_track.R")
path_load = '/Users/hao/Dropbox/stats_project/FOD_codes_simulation/Real_data/S110933/fitting/space_indexx108-123y124-139z37-42/'
num_fib_cut = 4
temp = readMat(paste0(path_load,'for_tracking_cut',toString(num_fib_cut),'.mat'))
v.obj = temp
eig = -v.obj$vec
loc = v.obj$loc
tracks1 <- list()
tracks2 <- list()
all.pvox <- NULL
all.pdir <- NULL
all.pdis <- NULL
all.ppdis <- NULL
n.use.iind <- array(0, dim=length(v.obj$n.fiber2))
n.iinds <- array(0,dim=length(v.obj$n.fiber2))
lens <- array(0, dim=length(v.obj$n.fiber2))
braingrid = temp$braingrid
xgrid.sp = temp$xgrid.sp
ygrid.sp = temp$ygrid.sp
zgrid.sp = temp$zgrid.sp
map = temp$map
rmap = temp$rmap
n.fiber = temp$n.fiber
n.fiber2 = temp$n.fiber2
max.line = 100
nproj = 2
thres.ang = 0.5235988
vorient=c(1,1,1)
elim = T
elim.thres = 1
for (iind in which(v.obj$n.fiber2>0)){
cat(iind,"\n")
tracks1[[iind]] <- fiber.track(iind=iind, eig=v.obj$vec, loc=v.obj$loc,
map=v.obj$map, rmap=v.obj$rmap,
n.fiber=v.obj$n.fiber, xgrid.sp=xgrid.sp,
ygrid.sp=ygrid.sp, zgrid.sp=zgrid.sp, braingrid=braingrid,
max.line=max.line, nproj=nproj, thres.ang=thres.ang,
vorient=vorient)
tracks2[[iind]] <- fiber.track(iind=iind, eig=-v.obj$vec, loc=v.obj$loc,
map=v.obj$map, rmap=v.obj$rmap,
n.fiber=v.obj$n.fiber, xgrid.sp=xgrid.sp,
braingrid=braingrid,
ygrid.sp=ygrid.sp, zgrid.sp=zgrid.sp,
max.line=max.line, nproj=nproj, thres.ang=thres.ang,
vorient=vorient)
#all.pvox <- c(all.pvox, tracks1[[iind]]$pvox, tracks2[[iind]]$pvox)
#all.pdir <- rbind(all.pdir, tracks1[[iind]]$pdir, tracks2[[iind]]$pdir)
#all.pdis <- c(all.pdis, tracks1[[iind]]$pdis, tracks2[[iind]]$pdis)
#all.ppdis <- c(all.ppdis, tracks1[[iind]]$ppdis, tracks2[[iind]]$ppdis)
n.use.iind[tracks1[[iind]]$iinds] <- n.use.iind[tracks1[[iind]]$iinds] + 1
n.use.iind[tracks2[[iind]]$iinds] <- n.use.iind[tracks2[[iind]]$iinds] + 1
n.use.iind[iind] <- n.use.iind[iind] - 1
n.iinds[iind] <- length(union(tracks1[[iind]]$iinds, tracks2[[iind]]$iinds))
lens[iind] <- get.fdis(tracks1[[iind]]$inloc) + get.fdis(tracks2[[iind]]$inloc)
if (length(all.pdis)!=length(all.pvox)){
break
}
}
#len.ord <- order(n.iinds, decreasing=T)
len.ord <- order(lens, decreasing=T)
if (max(lens[n.iinds<=1])> elim.thres){
cat("elim.thres is too small: it should be set at least", max(lens[n.iinds<=1]),"\n")
}
if (elim){
update.ind <- rep(T, length(v.obj$n.fiber2))
#update.ind[as.logical((v.obj$n.fiber2==0)+(n.use.iind<=elim.thres))] <- F
#update.ind[as.logical((v.obj$n.fiber2==0)+(n.iinds<=elim.thres))] <- F
update.ind[as.logical((v.obj$n.fiber2==0)+(lens<=elim.thres))] <- F
nv.obj <- update.v.obj(v.obj, list(vec=v.obj$vec, update.ind=update.ind))$obj
} else {
nv.obj <- v.obj
update.ind <- rep(T, length(v.obj$n.fiber2))
#update.ind[as.logical((v.obj$n.fiber2==0)+(n.iinds<=elim.thres))] <- F
update.ind[as.logical((v.obj$n.fiber2==0)+(lens<=elim.thres))] <- F
}
sorted.iinds <- (1:length(v.obj$n.fiber2))[len.ord]
sorted.update.ind <- update.ind[len.ord]
#############################
## v.track
#############################
idx_fiber_track = which(v.obj$n.fiber2>0)
iind = idx_fiber_track[1472]
iind = 19
cat(iind,"\n")
tracks1[[iind]] <- fiber.track(iind=iind, eig=v.obj$vec, loc=v.obj$loc,
map=v.obj$map, rmap=v.obj$rmap,
n.fiber=v.obj$n.fiber, xgrid.sp=xgrid.sp,
ygrid.sp=ygrid.sp, zgrid.sp=zgrid.sp, braingrid=braingrid,
max.line=max.line, nproj=nproj, thres.ang=thres.ang,
vorient=vorient)
tracks2[[iind]] <- fiber.track(iind=iind, eig=-v.obj$vec, loc=v.obj$loc,
map=v.obj$map, rmap=v.obj$rmap,
n.fiber=v.obj$n.fiber, xgrid.sp=xgrid.sp,
ygrid.sp=ygrid.sp, zgrid.sp=zgrid.sp, braingrid=braingrid,
max.line=max.line, nproj=nproj, thres.ang=thres.ang,
vorient=vorient)
#############################
## fiber.track
#############################
braindim <- dim(braingrid)[-1]
nvox <- prod(braindim)
dimens <- c(xgrid.sp, ygrid.sp, zgrid.sp)
path.voxel <- array(dim=max.line)
path.dir <- array(dim=c(max.line, 3))
path.in <- array(dim=c(max.line, 3))
path.change <- array(dim=max.line)
path.iind <- array(dim=max.line)
pass.vox <- NULL
pass.dir <- NULL
pass.dis <- NULL
pass.pdis <- NULL # perpendicular distance
iind = 19
# initialization
path.voxel[1] <- map[iind]
path.dir[1,] <- eig[iind,]
path.in[1,] <- loc[iind,]
path.change[1] <- T
path.iind[1] <- iind
ii <- 1
while ((ii<max.line)){
# if (T){
# cat(ii,"\n")
# spheres3d(path.in[ii,], radius=0.002, col="red")
# }
# fio <- fiber.in.out(inc=path.in[ii,]-loc[path.iind[ii],], direct=path.dir[ii,], dimens=dimens)
inc = path.in[ii,]-loc[path.iind[ii],]
direct=path.dir[ii,]
if (sum(dimens==0)){
stop("directions has zero component, not yet supported! Please modify fiber.in.out\n")
}
# compute the distance of the current fiber directon to each face of the current voxel
tempdiff <- (round(cbind(dimens/2-inc,-inc-dimens/2),5)/direct) ## Hao: add round5
cbind(dimens/2-inc,-inc-dimens/2)
tempdiff
# Hao
# tempdiff[tempdiff==Inf]=1e10
tempdiff[tempdiff==-Inf]=Inf
# tempdiff[is.nan(tempdiff)]=1e10
# tempdiff[tempdiff==Inf]=1e10
# get which axis is the current fiber direction hitting face of the current voxel first
# 1:x 2:y 3:z
index1 <- which.min(diag(tempdiff[,2-(direct>=0)])) # Hao change direct>0 to direct>=0
# which direction it is hitting 1:positive 2:negative
index <- c(index1, (2-(direct>0))[index1])
const <- tempdiff[index[1],index[2]]
outc <- round(inc + const*direct,5) ## Hao: add round5
fio = list(outc=outc,index=as.vector(index))
path.in[ii+1,] <- fio$outc + loc[path.iind[ii],]
# for previous pass.dis and pass.pdis, using the previous "change"
if ((!path.change[ii])&&(n.fiber[path.voxel[ii]]>0)){
pass.pdis <- c(pass.pdis, dist.line(loc[path.iind[ii],], path.in[ii,], path.in[ii+1,]))
pass.dis <- c(pass.dis, sqrt(sum((path.in[ii,]-path.in[ii+1,])^2)))
}
# determine which voxel it is going to
next.vox <- get.out.vox(fio$index, path.voxel[ii], braindim=braindim, vorient=vorient)
if (is.na(next.vox)){
break
}
# determine if we should stop
pro.res <- project.proceed(inc0=path.in[ii+1,], vox0=next.vox,
dir0=path.dir[ii,], loc, eig, rmap, n.fiber,
braindim, dimens, nproj=nproj,
thres.ang=thres.ang, vorient=vorient)
change <- pro.res$first
good <- pro.res$last
if (!good){
break
}
# update voxel
path.voxel[ii+1] <- next.vox
# update dir, iind and change
if (n.fiber[next.vox]<=1){
path.iind[ii+1] <- rmap[next.vox]
path.change[ii+1] <- change
if (change){
path.dir[ii+1,] <- eig[path.iind[ii+1],]
} else {
path.dir[ii+1,] <- path.dir[ii,]
if (n.fiber[next.vox]==1){
pass.vox <- c(pass.vox,next.vox)
pass.dir <- rbind(pass.dir, path.dir[ii,])
}
}
} else {
# thresholding rule -> determine stop or not, and within the thresholding rule, choose the closest
if (change){
# decide which directions
tiind <- rmap[next.vox]
chosen <- which.max(abs(eig[tiind+(0:(n.fiber[next.vox]-1)),]%*%path.dir[ii,]))
path.iind[ii+1] <- tiind+chosen-1
path.dir[ii+1,] <- eig[path.iind[ii+1],]
path.change[ii+1] <- T
} else {
path.iind[ii+1] <- rmap[next.vox]
path.change[ii+1] <- F
path.dir[ii+1,] <- path.dir[ii,]
pass.vox <- c(pass.vox,next.vox)
pass.dir <- rbind(pass.dir, path.dir[ii,])
}
}
# align directions
path.dir[ii+1,] <- sign(sum(path.dir[ii+1,]*path.dir[ii,]))*path.dir[ii+1,]
ii <- ii+1
}
if (ii<max.line){
path.in <- path.in[1:(ii+1),]
path.iind <- path.iind[1:ii]
path.dir <- path.dir[1:ii,]
path.change <- path.change[1:ii]
}
#############################
## project.proceed
#############################
vox0 = next.vox
dir0=path.dir[ii,]
first <- proceed(vox0, dir0, eig, rmap, n.fiber, thres.ang)
#############################
## proceed
#############################
good <- T
if (n.fiber[vox0]==0){
good <- F
} else if (n.fiber[vox0]==1) {
good <- acos(min(abs(eig[rmap[vox0],]%*%dir0),1))<thres.ang
} else {
good <- as.logical(sum(as.vector(acos(pmin(abs(eig[rmap[vox0]+(0:(n.fiber[vox0]-1)),]%*%dir0),1)))<thres.ang))
}
#############################
## fiber.in.out
#############################
inc = path.in[ii,]-loc[path.iind[ii],]
direct=path.dir[ii,]
if (sum(dimens==0)){
stop("directions has zero component, not yet supported! Please modify fiber.in.out\n")
}
# compute the distance of the current fiber directon to each face of the current voxel
tempdiff <- (cbind(dimens/2-inc,-inc-dimens/2)/direct)
index1 <- which.min(diag(tempdiff[,2-(direct>=0)]))
index <- c(index1, (2-(direct>0))[index1])
const <- tempdiff[index[1],index[2]]
outc <- inc + const*direct
return(list(outc=outc, index=as.vector(index)))
#####
# compute the distance of the current fiber directon to each face of the current voxel
tempdiff <- (cbind(dimens/2-inc,-inc-dimens/2)/direct)
# Hao
tempdiff[tempdiff==Inf]=1e10
tempdiff[tempdiff==-Inf]=-1e10
tempdiff[is.nan(tempdiff)]=1e10
tempdiff[tempdiff==Inf]=1e10
# get which axis is the current fiber direction hitting face of the current voxel first
# 1:x 2:y 3:z
index1 <- which.min(diag(tempdiff[,2-(direct>=0)])) # Hao change direct>0 to direct>=0
# which direction it is hitting 1:positive 2:negative
index <- c(index1, (2-(direct>0))[index1])
const <- tempdiff[index[1],index[2]]
outc <- inc + const*direct
return(list(outc=outc, index=as.vector(index)))
#############################
## get.out.vox
#############################
cvox = path.voxel[ii]
cvoxindex <- as.vector(arrayInd(cvox, braindim))
if (index[2]==1){
# positive sides
cvoxindex[index[1]] <- cvoxindex[index[1]] + vorient[index[1]]
} else {
# negative sides
cvoxindex[index[1]] <- cvoxindex[index[1]] - vorient[index[1]]
}
if ((cvoxindex[index[1]]<1)||(cvoxindex[index[1]]>braindim[index[1]])){
return(NA)
} else {
return(ArrayIndex(braindim, cvoxindex[1], cvoxindex[2], cvoxindex[3]))
}
#########
eig_min = rep(0,dim(eig)[1])
for(iind in 1:dim(eig)[1]){
eig_min[iind] = min(abs(eig))
}
min(abs(eig),na.rm=T)
|
2ff2ee23ad9f3b8c77ab3985fcfff6fceddce0b0
|
3a5ae60a34608840ef484a901b61a363b1167756
|
/vignettes/general_processing.R
|
c73797128ad456b1d9e694537df20b6442797cd8
|
[] |
no_license
|
SWS-Methodology/hsfclmap
|
3da8ca59a1ceb90564ec70a448a6f0340ca86420
|
eb2bc552fcce321b3dd7bc8655b092bc7a428e1e
|
refs/heads/master
| 2021-01-17T17:35:59.484994 | 2016-12-19T17:47:53 | 2016-12-19T17:47:53 | 70,464,081 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 2,679 |
r
|
general_processing.R
|
library(magrittr)
library(stringr)
library(futile.logger)
library(dplyr, warn.conflicts = FALSE)
library(hsfclmap)
cores <- parallel::detectCores(all.tests = TRUE)
if(cores > 1) {
library(foreach)
library(doParallel)
doParallel::registerDoParallel(cores = cores)
}
trade <- esdata13
tariffline <- FALSE
reportdir <- file.path(
tempdir(),
"faoreports",
format(Sys.time(), "%Y%m%d%H%M%S%Z"))
stopifnot(!file.exists(reportdir))
dir.create(reportdir, recursive = TRUE)
if(!tariffline)
trade %<>% esdata2faoarea(loadgeonom()) else {
m49faomap <- loaddatafromweb(
"https://github.com/SWS-Methodology/faoswsTrade/blob/master/data/m49faomap.RData?raw=true")
trade %<>%
left_join(m49faomap, by = c("reporter" = "m49")) %>%
select_(~-reporter, reporter = ~fao)
}
if(tariffline)
trade %<>%
mutate_(flow = ~recode(flow, '4' = 1L, '3' = 2L))
hsfclmap4 <- hsfclmap3 %>%
filter(str_detect(fromcode, "^\\d+$"),
str_detect(tocode, "^\\d+$")) %>%
mutate(linkid = row_number())
trade %<>% do(hsInRange(.$hs, .$reporter, .$flow,
hsfclmap4,
parallel = cores > 1L))
layout.glimpse <- function(level, tbl, ...) dplyr::as.tbl(tbl)
appender.glimpse <- function(tbl) tbl
trade <- trade %>%
# Mapping statistics
group_by(id) %>%
mutate_(multlink = ~length(unique(fcl)) > 1,
nolink = ~any(is.na(fcl))) %>%
ungroup() %>%
arrange_(~area, ~flow, ~hsorig)
trade %>%
filter_(~nolink) %>%
select_(~area, ~flow, hs = ~hsorig) %>%
write.csv(file = file.path(reportdir, "nolinks.csv"),
row.names = FALSE)
trade %>%
filter_(~multlink) %>%
select_(~area, ~flow, hs = ~hsorig, ~fcl) %>%
write.csv(file = file.path(reportdir, "multilinks.csv"),
row.names = FALSE)
flog.info("Reports in %s/",
reportdir)
trade %>%
group_by_(~id) %>%
summarize_(multlink = ~sum(any(multlink)),
nolink = ~sum(any(nolink))) %>%
summarize_(totalrecsmulti = ~sum(multlink),
totalnolink = ~sum(nolink),
propmulti = ~sum(multlink) / n(),
propnolink = ~sum(nolink) / n()) %>%
{flog.info("Multi and no link:", ., capture = TRUE)}
# Remove ES reporters from TL
esreporters <- unique(esdatafcl14$area)
trade <- trade %>%
filter(!area %in% esreporters)
# Idea for split ranges
x <- tibble::tribble(
~year, ~fcl,
1, 1,
2, 1,
3, 1,
4, 2,
5, 2,
6, 1,
7, 1)
x %>%
mutate(change = fcl != lag(fcl),
change = ifelse(is.na(change), FALSE, change),
change = cumsum(change))
|
df4673ad0c6156a5b48a3304388ac65fa4963a91
|
90df0cb421dc4221bfce0929054d8067a50af72a
|
/Rscripts/old_fig_scripts/fig_mixture_model_demo.R
|
860068e77338bdadd6b1802a9054e6f88b5328be
|
[
"MIT"
] |
permissive
|
SlavovLab/DART-ID_2018
|
c1c7de6cd03690e70cf1c27a9bac92d977b96599
|
84e73bc66e9e9a64d848d06463255db92561bfb7
|
refs/heads/master
| 2020-04-13T03:21:46.341466 | 2019-05-15T05:47:18 | 2019-05-15T05:47:18 | 162,929,280 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,226 |
r
|
fig_mixture_model_demo.R
|
## mixture model demo ------
x <- seq(0,60,by=0.1)
#y1 <- dlnorm(x, meanlog=4.663, sdlog=0.5089)
y1 <- dnorm(x, mean=38, sd=17)
y2 <- dnorm(x, mean=20, sd=1.78)
#y3 <- dnorm(x, mean=80, sd=2.3)
#y4 <- dnorm(x, mean=120, sd=2)
#plot(x, y2, 'l', col='red')
#lines(x,y1,'l', col='black')
#p <- ggplot(data.frame(x,y1,y2,y3,y4)) +
p <- ggplot(data.frame(x,y1,y2)) +
geom_area(aes(x=x, y=y1), fill='red', alpha=0.3) +
geom_path(aes(x=x, y=y1), color='red', size=0.4) +
geom_area(aes(x=x, y=y2), fill='blue', alpha=0.3) +
geom_path(aes(x=x, y=y2), color='blue', size=0.4) +
#geom_path(aes(x=x, y=y3), color='blue', size=0.4) +
#geom_path(aes(x=x, y=y4), color='blue', size=0.4) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(limits=c(0, 0.24), expand=c(0,0)) +
labs(x='Retention Time', y='Density') +
theme_bw() %+replace% theme(
axis.ticks=element_blank(),
axis.text=element_blank(),
axis.title.x = element_text(family='Helvetica', size=6),
axis.title.y = element_text(family='Helvetica', size=6, angle=90),
panel.grid.minor=element_blank()
#panel.grid=element_blank()
)
ggsave('manuscript/Figs/mixture_model_demo.pdf', plot=p, 'pdf', width=4, height=2.5, units='cm')
|
f3a56af15a2c4e5f138c081c9f93eac1fcb80d28
|
4160ec1f770aa1124aeefe44cca5b97be3b368a5
|
/Cleaning_Featuring/Katz_Back-off_2.2.R
|
632d0144acfc8d0f9206bea5239f46c6f67891d5
|
[] |
no_license
|
jordiac/Capstone_DSS
|
831e4f9c0081cbf5a0e569b2d7b393e04d718aed
|
a99f80b82de2994af5102b864a429a26b4426d0d
|
refs/heads/master
| 2020-12-30T16:42:17.682853 | 2017-07-22T13:57:06 | 2017-07-22T13:57:06 | 91,016,685 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 13,223 |
r
|
Katz_Back-off_2.2.R
|
## -------------------------------------------------------
## Katz's Back-off implementation
## -------------------------------------------------------
## ******************* Notes ***************************
## This implementation considers only 2-grams and 3-grams
## -----------------------------------------------------
## Discount coefficients
## -----------------------------------------------------
discount <- function(TwoGram, ThreeGram){
## input : the text input with 2 words : input[1:2] : 1col= 1st words; 2col=2nd word;
## d= (r+1)/r * (N_r+1)/N_r
## r: frequency of the gram ; Nr = number of times frequency "r" appears in the list
## --------------------
## Treating 3 gram list
## --------------------
## Tgram : list of 3grams containing the 2 words input (3rd col: freq)
un <- unique(ThreeGram[,4])
if ((TRUE %in% is.na(un)) == TRUE){
print("There are frequencies = NA, please check your input 3-GRAM")
stop()
}
## Defining the discount values for each different frequency
if (length(un) >0){
disc <- vector(mode="numeric", length = length(un))
nm <- min(length(un),6) ## If frequency is higher than 6-1=5 --> d=1
for (i in 1:length(un)){
if (un[i] < nm){
freq <- un[i]
freq2 <- freq+1
Nfreq <- nrow(ThreeGram[ThreeGram[,4]==freq,])
Nfreq2 <- nrow(ThreeGram[ThreeGram[,4]==freq2,])
dis <- freq2 / freq * Nfreq2 / Nfreq
if (dis ==0){dis<-1}
disc[i] <- dis
} else {
dis <- 1
disc[i] <- dis
}
}
}
## disc --> discount values for each unique frequency
ma <- match(ThreeGram[,4], un)
val <- NULL
for (j in 1: nrow(ThreeGram)){
f <- disc[ma[j]]
val <- c(val,f)
}
ThreeGram$disc <- round(val,3) # Add discount values to 3-gram data frame
mm <- which(ThreeGram[,5] >1)
if (length(mm)>0) {ThreeGram[mm,5] <- 1} # Maximizing to 1
## --------------------
## Treating 2-gram list
## --------------------
un <- unique(TwoGram[,3])
if ((TRUE %in% is.na(un)) == TRUE){
print("There are frequencies = NA, please check your input 2-GRAM")
stop()
}
## Defining the discount values for each different frequency
if (length(un) >0){
disc <- vector(mode="numeric", length = length(un))
nm <- min(length(un),7) ## If frequency is higher than 7-1=6 --> d=1
for (i in 1:length(un)){
if (un[i] < (nm-1)){
freq <- un[i]
freq2 <- freq+1
Nfreq <- nrow(TwoGram[TwoGram[,3]==freq,])
Nfreq2 <- nrow(TwoGram[TwoGram[,3]==freq2,])
dis <- freq2 / freq * Nfreq2 / Nfreq
if (dis ==0){dis<-1}
disc[i] <- dis
} else {
dis <- 1
disc[i] <- dis
}
}
}
## disc --> discount values for each unique frequency
ma <- match(TwoGram[,3], un)
val <- NULL
for (j in 1: nrow(TwoGram)){
f <- disc[ma[j]]
val <- c(val,f)
}
TwoGram$disc <- round(val,3) # Add discount values to 3-gram data frame
mm <- which(TwoGram[,4] >1)
if (length(mm)>0) {TwoGram[mm,4] <- 1} # Maximizing to 1
return(list(TwoGram, ThreeGram))
}
## -----------------------------------------------------
## Prediction algorithm
## -----------------------------------------------------
## gets an input and return the 5 most probable words as output as per Katz Back-off method
Katz_Backoff <- function(input){
load( file="bigram_fin.RData") ##bigram
load( file="trigram_fin.RData") ##trigram
load( file="quadgram_fin.RData") ##quadgram
Nwords <- length(input)
# Defining the input texts for each Ngram
if (Nwords >2){
qtext <- input[(length(input)-2):length(input)] ##input for quadgram
ttext <- input[(length(input)-1):length(input)] ##input for trigram
btext <- input[length(input)] ##input for bigram
} else if (Nwords==2){
ttext <- input[1:2]
btext <- input[2]
} else if (Nwords==1) {
btext <- input[1]
}
output <- NULL
## Predicting
if (Nwords >2){
qlist <- which(quadgram[,1] == qtext[1] & quadgram[,2] == qtext[2] & quadgram[,3] == qtext[3])
tlist <- which(trigram[,1] == ttext[1] & trigram[,2] == ttext[2])
blist <- which(bigram[,1] == btext[1])
## 4-gram
if (length(qlist) >0){
output4 <-quadgram[qlist,4:5]
colnames(output4) <- c("Predic", "Freq")
output <- rbind(output, output4)
output$prob <- 1 ## We give prob=1 to all elements
}
## 3-gram and 2-gram as per KATZ
if (length(qlist)<5){
sel1 <- which(trigram[,1] %in% ttext[1] & trigram[,2] %in% ttext[2])
if (length(sel1) >0){
sel1 <- trigram[sel1,]
sel2 <- which(sel1[,2] %in% ttext[2])
sel2 <- sel1[sel2,]
## Defining pbeta
pbeta <- 1-(sum(sel2[,4]*sel2[,5]) / sum(sel2[,4]))
## Selecting bigram rows where the 1st word is equal to the last word
usel <- which(bigram[,1] %in% ttext[2])
usel <- bigram[usel,]
## Removing those having word 2 = word 3 of the trigram
rsel <- which(usel[,2] %in% sel2[,3])
usel <- usel[-rsel,]
## for each word left, we calculate its probability 2-gram
usel$prob <- usel$Freq * usel$disc * pbeta /(sum(usel$Freq*usel$disc))
## Calculate the probability for each 3-gram end word
sel2$prob <- sel2$Freq * sel2$disc / sum(sel2$Freq * sel2$disc)
##Subseting each end word in bigram and trigram with its probability and sorting
bisel <- data.frame(endword=usel$word2, prob=usel$prob, stringsAsFactors = FALSE)
trisel <- data.frame(endword=sel2$word3, prob=sel2$prob, stringsAsFactors = FALSE)
final <- rbind(bisel, trisel)
yy <- order(final$prob, decreasing = TRUE)
final <- final[yy,]
output2 <- final[1:5,]
output <- rbind(output,output2)
} else {
## Applying bigram
find <- which(bigram[,1] %in% btext[1])
if (length(find)>0){
bisel <- bigram[find,]
output2 <- bisel[1:5,2:3]
output <- rbind(output,output2)
}
}
}
}else if (Nwords ==2) {
## 3-gram and 2-gram as per KATZ
sel1 <- which(trigram[,1] %in% ttext[1] & trigram[,2] %in% ttext[2])
if (length(sel1) >0){
sel1 <- trigram[sel1,]
sel2 <- which(sel1[,2] %in% ttext[2])
sel2 <- sel1[sel2,]
## Defining pbeta
pbeta <- 1-(sum(sel2[,4]*sel2[,5]) / sum(sel2[,4]))
## Selecting bigram rows where the 1st word is equal to the last word
usel <- which(bigram[,1] %in% ttext[2])
usel <- bigram[usel,]
## Removing those having word 2 = word 3 of the trigram
rsel <- which(usel[,2] %in% sel2[,3])
usel <- usel[-rsel,]
## for each word left, we calculate its probability 2-gram
usel$prob <- usel$Freq * usel$disc * pbeta /(sum(usel$Freq*usel$disc))
## Calculate the probability for each 3-gram end word
sel2$prob <- sel2$Freq * sel2$disc / sum(sel2$Freq * sel2$disc)
##Subseting each end word in bigram and trigram with its probability and sorting
bisel <- data.frame(endword=usel$word2, prob=usel$prob, stringsAsFactors = FALSE)
trisel <- data.frame(endword=sel2$word3, prob=sel2$prob, stringsAsFactors = FALSE)
final <- rbind(bisel, trisel)
yy <- order(final$prob, decreasing = TRUE)
final <- final[yy,]
output2 <- final[1:5,]
output <- rbind(output,output2)
} else {
## Applying bigram
find <- which(bigram[,1] %in% btext[1])
if (length(find)>0){
bisel <- bigram[find,]
output2 <- bisel[1:5,2:3]
output <- rbind(output,output2)
}
}
} else if (Nwords==1){
blist <- which(bigram[,1] == btext[1])
if (length(blist) >0){
output <- rbind(output, bigram[blist,])
colnames(output) <- c("Predic", "Freq")
num <- min(5, nrow(output))
output <- output[1:num,2:3]
}
}
output <- output[1:5,]
return(output)
}
source("./Clean_2.0.R")
##--------- Main prediction algorithm ----------------------------
predic_text <- function(input){
load(file="./unigram_fin.RData")
input <- textClean(input) ## Cleaning the input data
input <- unlist(strsplit(input, split= " ")) ## Splitting in different words
condition <- 0
for (i in 1:(as.integer(length(input)/2)-1)){
condition <- 1
result <- Katz_Backoff(input) ## results from Katz prediction function
if (is.null(result)==TRUE ){ ## if no results obtained from Katz, remove last word
condition <- 0
input <- input[-c(length(input)-1,length(input)) ] ## Remove 2 last words if no results
}
if (condition == 1){
break()
}
}
if(is.null(result)==FALSE & nrow(result)>1) {result <- data.frame(words=as.character(result[,1]))}
if(is.null(result)==FALSE & nrow(result)==1) {result <- data.frame(words=as.character(result))}
if(is.null(result)==TRUE) {result <- data.frame(words=as.character(unigram[,1]))}
return(result)
}
|
7a04a1c4d57be2bb2404019c38e7898be4539955
|
e248c9ff1d03ac10216bb9e86611491ec16c1fdd
|
/R/functions.R
|
46d09c59e833899cff67d448bb4fb69c68614dfb
|
[
"Apache-2.0"
] |
permissive
|
lolow/ENGAGE-overshoot-impacts
|
25a1b56395c0a8198046d82c6ba4c815ca450d63
|
2bd2a81eae63e5dcf114b108842ded55a07df867
|
refs/heads/main
| 2023-04-07T14:16:59.671132 | 2021-11-09T08:28:12 | 2021-11-09T08:28:12 | 414,539,627 | 0 | 1 | null | null | null | null |
UTF-8
|
R
| false | false | 559 |
r
|
functions.R
|
source('R/data_ssp_db.R')
source('R/data_engage_db.R')
source('R/data_climate.R')
source('R/impact.R')
source('R/impact_bhm.R')
source('R/impact_levels.R')
source('R/impact_arnell.R')
source('R/impact_slr.R')
source('R/impact_tail.R')
source('R/compute_net_benefits.R')
source('R/compute_cmit.R')
source('R/compute_admg.R')
source("R/plot_temperature.R")
source("R/plot_impact_physical.R")
source("R/plot_cmit.R")
source("R/plot_admg.R")
source("R/plot_cba.R")
source("R/plot_slr.R")
source("R/plot_emi.R")
source("R/plot_fig_paper.R")
source('R/zzz.R')
|
e3a46fe90e94d385b403d2aed5fdda4ec970fd3b
|
8eccf1b9d13564b48d6936fc7e8878bca2a757f7
|
/Amazon reviews scraping.R
|
30db1823ee5ad5ac9810eab1c36ca55e40d36345
|
[] |
no_license
|
MohitKedia/Web-Scraping
|
a9b2b37dec0cbf3ffd4b8005cba249960d405bc3
|
d4154faf1ffd09b72aeaee6156ca12cb10db3ab0
|
refs/heads/master
| 2020-03-10T04:48:24.023858 | 2018-07-09T13:40:12 | 2018-07-09T13:40:12 | 129,201,897 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 5,656 |
r
|
Amazon reviews scraping.R
|
library(rvest)
install.packages("RCrawler")
##############################
#NOKIA8_Reviews#
#METHOD1#
url <- "https://www.amazon.in/Nokia-8-Polished-Blue-64GB/product-reviews/B0714DP3BJ/ref=cm_cr_getr_d_show_all?showViewpoints=1&pageNumber=1&reviewerType=all_reviews"
webpage <- read_html(url)
reviews_data <- html_nodes(webpage, '.a-color-base')
reviews <- html_text(reviews_data)
description_data <- html_nodes(webpage, '.review-text')
description <- html_text(description_data)
buyer_data <- html_nodes(webpage, '.author')
buyer <- html_text(buyer_data)
Amazon_reviews_nokia <- data.frame(REVIEWS = reviews, DESCRIPTION = description, BUYER = buyer)
View(Amazon_reviews_nokia)
#METHOD2#
library(rvest)
library(purrr)
url_base <- "https://www.amazon.in/Nokia-8-Polished-Blue-64GB/product-reviews/B0714DP3BJ/ref=cm_cr_getr_d_paging_btm_1?showViewpoints=1&pageNumber=%d&reviewerType=all_reviews&filterByStar=positive"
map_df(1:32, function(i) {
cat(".")
pg <- read_html(sprintf(url_base, i))
data.frame(REVIEWS=html_text(html_nodes(pg, ".a-color-base")),
DESCRIPTION=html_text(html_nodes(pg, ".review-text")),
BUYER=html_text(html_nodes(pg, ".author")),
stringsAsFactors=FALSE)
}) -> Nokia8_Amazonreviews
View(Nokia8_Amazonreviews)
##################################
#Oneplus5T_Reviews#
library(rvest)
library(purrr)
url_base <- "https://www.amazon.in/OnePlus-Midnight-Black-64GB-memory/product-reviews/B0756ZFXVB/ref=cm_cr_arp_d_viewopt_sr?showViewpoints=1&pageNumber=%d&filterByStar=five_star&formatType=all_formats"
map_df(1:921, function(i) {
cat(".")
pg <- read_html(sprintf(url_base, i))
data.frame(RATING = 5,
REVIEWS=html_text(html_nodes(pg, ".a-color-base")),
DESCRIPTION=html_text(html_nodes(pg, ".review-text")),
BUYER=html_text(html_nodes(pg, ".author")),
DATE = html_text(html_nodes(pg, "#cm_cr-review_list .review-date")),
PRODUCT = html_text(html_nodes(pg,".a-link-normal.a-color-secondary")),
stringsAsFactors=FALSE)
}) -> Oneplus5T_Amazonreviews_1
url_base <- "https://www.amazon.in/OnePlus-Midnight-Black-64GB-memory/product-reviews/B0756ZFXVB/ref=cm_cr_arp_d_viewopt_sr?showViewpoints=1&pageNumber=%d&filterByStar=four_star&formatType=all_formats"
map_df(1:163, function(i){
cat("boom ")
pg <- read_html(sprintf(url_base,i))
data.frame(RATING=4,
REVIEWS=html_text(html_nodes(pg, ".a-color-base")),
DESCRIPTION=html_text(html_nodes(pg, ".review-text")),
BUYER=html_text(html_nodes(pg, ".author")),
DATE = html_text(html_nodes(pg, "#cm_cr-review_list .review-date")),
PRODUCT = html_text(html_nodes(pg,".a-link-normal.a-color-secondary")),
stringsAsFactors=FALSE)
}) -> Oneplus5T_Amazonreviews_2
url_base <- "https://www.amazon.in/OnePlus-Midnight-Black-64GB-memory/product-reviews/B0756ZFXVB/ref=cm_cr_arp_d_viewopt_sr?showViewpoints=1&pageNumber=%d&filterByStar=three_star&formatType=all_formats"
map_df(1:35, function(i){
cat("boom ")
pg <- read_html(sprintf(url_base,i))
data.frame(RATING=3,
REVIEWS=html_text(html_nodes(pg, ".a-color-base")),
DESCRIPTION=html_text(html_nodes(pg, ".review-text")),
BUYER=html_text(html_nodes(pg, ".author")),
DATE = html_text(html_nodes(pg, "#cm_cr-review_list .review-date")),
PRODUCT = html_text(html_nodes(pg,".a-link-normal.a-color-secondary")),
stringsAsFactors=FALSE)
}) -> Oneplus5T_Amazonreviews_3
url_base <- "https://www.amazon.in/OnePlus-Midnight-Black-64GB-memory/product-reviews/B0756ZFXVB/ref=cm_cr_arp_d_viewopt_sr?showViewpoints=1&pageNumber=%d&filterByStar=two_star&formatType=all_formats"
map_df(1:19, function(i){
cat("boom ")
pg <- read_html(sprintf(url_base,i))
data.frame(RATING=2,
REVIEWS=html_text(html_nodes(pg, ".a-color-base")),
DESCRIPTION=html_text(html_nodes(pg, ".review-text")),
BUYER=html_text(html_nodes(pg, ".author")),
DATE = html_text(html_nodes(pg, "#cm_cr-review_list .review-date")),
PRODUCT = html_text(html_nodes(pg,".a-link-normal.a-color-secondary")),
stringsAsFactors=FALSE)
}) -> Oneplus5T_Amazonreviews_4
url_base <- "https://www.amazon.in/OnePlus-Midnight-Black-64GB-memory/product-reviews/B0756ZFXVB/ref=cm_cr_arp_d_viewopt_sr?showViewpoints=1&pageNumber=%d&filterByStar=one_star&formatType=all_formats"
map_df(1:65, function(i){
cat("boom ")
pg <- read_html(sprintf(url_base, i))
data.frame(REVIEWS=html_text(html_nodes(pg, ".a-color-base")),
DESCRIPTION=html_text(html_nodes(pg, ".review-text")),
BUYER=html_text(html_nodes(pg, ".author")),
DATE = html_text(html_nodes(pg, "#cm_cr-review_list .review-date")),
PRODUCT = html_text(html_nodes(pg,".a-link-normal.a-color-secondary")),
stringsAsFactors=FALSE)
}) -> Oneplus5T_Amazonreviews_5
Oneplus5T_Amazonreviews_5 <- cbind(RATING=1,Oneplus5T_Amazonreviews_5)
Oneplus5T_Amazonreviews <- rbind(Oneplus5T_Amazonreviews_1,Oneplus5T_Amazonreviews_2,Oneplus5T_Amazonreviews_3
,Oneplus5T_Amazonreviews_4,Oneplus5T_Amazonreviews_5)
View(Oneplus5T_Amazonreviews)
library(xlsx)
write.xlsx(Oneplus5T_Amazonreviews,file = "OnePlus5T Amazon Reviews.xlsx")
|
1125c9ecf1eb5c2d6f4b018c509ddf761ce7b2b4
|
4e77858f348a7081e6d9bc4fa5b0296bfa3b4291
|
/Assignment_1/assignment_1.R
|
484dd74e2718e2d7f1676509a7566bc22f888c09
|
[] |
no_license
|
bazzim/Coding-2-web-scraping
|
8da2f278e5048e173e08683b7d09ddccafda3407
|
bb89715aeb0d2c11b6471028b4c14b7d0258bd1e
|
refs/heads/main
| 2023-01-18T18:07:38.268269 | 2020-11-22T20:19:08 | 2020-11-22T20:19:08 | 315,120,911 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 2,462 |
r
|
assignment_1.R
|
library(rvest)
library(data.table)
rm(list=ls())
# website of interest: https://www.sciencenews.org/
## create a function which downloads information from a url to dataframe (from sciencenews.org)
get_sciencenews_page <- function(my_url){
print(my_url)
t <- read_html(my_url)
boxes <- t %>% html_nodes('.post-item-river__content___2Ae_0')
x <- boxes[[1]]
boxes_dfs <- lapply(boxes, function(x){
tl <- list()
tl[['title']] <- paste0( x %>% html_nodes('.post-item-river__title___J3spU') %>% html_text(), collapse = ' ')
tl[['link']] <- paste0( x %>% html_nodes('.post-item-river__title___J3spU > a') %>% html_attr('href'))
tl[['excerpt']] <- paste0( x %>% html_nodes('.post-item-river__excerpt___3ok6B') %>% html_text(), collapse = ' ')
tl[[ 'date' ]] <- paste0( x %>% html_nodes('.published') %>% html_text())
tl[[ 'author' ]] <- paste0( x %>% html_nodes('.n') %>% html_text())
tl[[ 'topic' ]] <- paste0( x %>% html_nodes('.post-item-river__eyebrow___33ASW')%>% html_text())
return(tl)
})
df <- rbindlist(boxes_dfs, fill = T)
return(df)
}
# create a function which requires two arguments. First a keyword then a number of pages to download.
get_searched_pages <- function(searchterm, pages_to_download) {
# concat the search terms together according to url
searchterm <- gsub(' ','+',searchterm)
# create links
if (pages_to_download == 1){
links_to_get <- paste0('https://www.sciencenews.org/?s=',searchterm)
}
else{
links_to_get <- c(paste0('https://www.sciencenews.org/?s=', searchterm),
paste0('https://www.sciencenews.org/page/', 2:pages_to_download, '?s=', searchterm))
}
ret_df <- rbindlist(lapply(links_to_get, get_sciencenews_page))
return(ret_df)
}
# testing function get_searched_pages
df2 <- get_searched_pages('artificial intelligence',2)
my_url <- "https://www.sciencenews.org/?s=machine+learning"
# apply function 1 "get_sciencenews_page"
df <- get_sciencenews_page(my_url)
# save the outputs of get_sciencenews_page() to a csv file
write.csv(df, 'sciencenews_output.csv')
# save a single object to file
saveRDS(df, "sciencenews_output.rds")
# apply function 2 "get_searched_pages()"
df2 <- get_searched_pages('machine learning',3)
# save the outputs of get_searched_pages() to a csv file
write.csv(df2, 'searched_pages_output.csv')
# save a single object to file
saveRDS(df2, "searched_pages_output.rds")
|
b1f8bdbcd39741a77a03898e41dfc3ae9fefc80f
|
85da7f67f9fd656b39f16a7cf0e63424636b706a
|
/ExData_Plotting1/plot4.R
|
10bec9d74cbdc970741150627911e2edc28b64e1
|
[] |
no_license
|
pivezhandi/ExData_Plotting1
|
cfe011fd20734a06779d767f43ac0b37b21758e7
|
53c7857bb451ee7184d43851ad94e83dd54cd234
|
refs/heads/master
| 2021-01-25T03:50:00.394834 | 2015-09-13T22:35:14 | 2015-09-13T22:35:14 | 42,414,640 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 1,420 |
r
|
plot4.R
|
setwd(file.path("D:", "sbu", "RLearning", "exploratory data analysis","project1"))
EPC <- read.csv("household_power_consumption.txt",
header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F,
stringsAsFactors=F, comment.char="",
quote='\"')
EPC$Date <- as.Date(EPC$Date, format="%d/%m/%Y")
data <- subset(EPC, subset=( (Date <= "2007-02-02") & (Date >= "2007-02-01")))
rm(EPC)
data$mixeddata <- as.POSIXct(paste(data$Date,data$Time))
par(mfrow = c( 2, 2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
with(data, {
plot(Global_active_power~mixeddata, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~mixeddata, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~mixeddata, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~mixeddata,col='Red')
lines(Sub_metering_3~mixeddata,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,inset = .05,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),trace=T,bty = "n")
plot(Global_reactive_power~mixeddata, type="l",
ylab="Global_rective_power",xlab="datetime")
})
dev.copy(png, file = "unnamed-chunk-5.png" ) ## Copy my plot to a PNG file
dev.off() ## closing the PNG device!
|
9717621be715bfc0c43746752df16c0f4fbd3f77
|
8e8abb1b8f31b1cad68e1e4534be0489555ad59e
|
/lovelyanalytics_kmeans_R.R
|
89a8230677770ceb71abf7cbbffa15bc458ed1b5
|
[] |
no_license
|
mjvieille/lovelyanalytics-kmeans
|
8d29f5f3c825a45a3dadf21ec5ae2b1bf1da0a04
|
49fc526754b56cf6188ffcf41154635b632536d5
|
refs/heads/master
| 2021-01-17T07:38:51.436410 | 2017-03-05T09:34:48 | 2017-03-05T09:34:48 | 83,783,186 | 0 | 0 | null | null | null | null |
ISO-8859-1
|
R
| false | false | 390 |
r
|
lovelyanalytics_kmeans_R.R
|
#***** lovelyanalytics.com *****
#***** k-means *****
# Chargement des données
data<-read_excel("~/lovelyanalytics/k-means/data/data.xlsx")
# Algorithme k-means pour créer 3 clusters
resultat_kmeans<- kmeans(data[,2:3],3)
# Anciennete moyenne et panier moyen par cluster
resultat_kmeans[2]
#Visuel
plot(data[,2:3], col=resultat_kmeans$cluster, pch=19)
|
40f83bfa8baf6d72ea6ac442aa5176a7947b80d7
|
c8b609bf58dab1a383bbea8b43a7bc2708adcb38
|
/man/circle_line_intersections.Rd
|
8bcae8c3d75d06e52181a7fe4b85371b31944bca
|
[] |
no_license
|
holaanna/contactsimulator
|
ce788627c12323c4ab6b3aa902da26bf3e2e4cf5
|
8bcd3f01e0bbe5fb7328d9f6beb27eb907779bdd
|
refs/heads/master
| 2022-03-17T03:25:18.841897 | 2019-11-26T18:33:29 | 2019-11-26T18:33:29 | 111,702,061 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 1,590 |
rd
|
circle_line_intersections.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{circle_line_intersections}
\alias{circle_line_intersections}
\title{Generates a set of intersection points between the cirlce and the grid lines.}
\usage{
circle_line_intersections(circle_x, circle_y, r, n_line, grid_lines)
}
\arguments{
\item{circle_x, circle_y}{The the euclidean coordinates of the center of the circle.}
\item{r}{The radius of the given circle.}
\item{n_line}{The number of grid lines.}
\item{grid_lines}{A 6 columns data frame with columns names as coor_x_1, coor_y_1, coor_x_2, coor_y_2, orient_line.
\describe{
\item{coor_x_1, coor_y_1}{Coordinates of the left end point of the grid line }
\item{coor_x_2, coor_y_2}{Coordinates of the right end point of the grid line }
\item{orient_line}{Line orientation}
\enumerate{
\item indicates horizontal orientation
\item indicates vetical orientation
}
\item{k_line}{Line numbering: bottom to top, then left to right}
}}
}
\value{
It returns a three columns data frame containing x-coordinate, y-coordanate of the intersection of the circle with the
grid, and the value of the angle betweem the x-axis and the line joining the center of the circle to the corresponding
intersection point.
}
\description{
\code{circle_line_intersections} computes the intersections points of a given circle with the grid lines along with
the angle formed with the x-axis.
}
\examples{
data(grid_line)
attach(grid_line)
circle_line_intersections(2022230,-3123109,10000,39,grid_line)
detach(grid_line)
}
|
2a8f73f7423e6d1c969c5dfe7ded2095b0c0e78c
|
a5b6e45f613c45691b9f8b9811791637fe40b378
|
/OldScripts/old_uORFome/DataBaseGetters.R
|
8a79aa9e7e3040a9c5c65674452573c48e3bb0d9
|
[] |
no_license
|
Roleren/RCode
|
e0bb86ca02fc5c7eb66943be028d11982782799b
|
db8c65ee9d15b0576f1269fcce81915bb38b6b31
|
refs/heads/master
| 2023-01-08T16:04:34.751910 | 2020-11-12T16:44:42 | 2020-11-12T16:44:42 | 312,334,583 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 3,501 |
r
|
DataBaseGetters.R
|
#' Get orf names from the orf data-base
#'
#' This is the primary key for most tables in the data-base
#' @param with.transcript a logical(F), should the transcript be included, this makes the
#' list have duplicated orfs
#' @param only.transcripts a logical(F), should only the transcript and not orfId be included
#' @param asCharacter a logical(T), should it return as character or data.table(F)
getORFNamesDB <- function(with.transcript = F, only.transcripts = F, asCharacter = T, uniques = F){
if (with.transcript) {
if(uniques) {
dt <- readTable("linkORFsToTxUnique")
} else {
dt <- readTable("linkORFsToTx")
}
if(only.transcripts){
if (asCharacter) {
return(as.character(unlist(dt[, 2], use.names = F)))
}
return(dt[, 2])
}
return(dt)
}
dt <-readTable("uniqueIDs")
if (asCharacter) {
dt <- as.character(unlist(dt, use.names = F))
}
return(dt)
}
#' Takes two tables from the database and extracts the rows of toBeMatched
#' that matches the txNames in referenced.
#' Both must have a column called txNames
#' @return the toBeMatched object matched by txNames
matchByTranscript <- function(toBeMatched, referenced){
Indices <- data.table(txNames = toBeMatched$txNames, ind = 1:length(toBeMatched$txNames))
merged <- merge(Indices, data.table(txNames = referenced$txNames),
by = "txNames", all.y = T, sort = F)
return(toBeMatched[merged$ind, ])
}
getIDColumns <- function(dt, allowNull = F){
nIDs <- 0
if (!is.numeric(dt[1,1][[1]])) {
nIDs = nIDs + 1
if (!is.numeric(dt[1,2][[1]])) {
nIDs = nIDs + 1
}
}
if(!nIDs){
if (allowNull) {
return(NULL)
} else {
stop("No id columns found for dt")
}
}
return(dt[, nIDs, with = FALSE])
}
#' fix this to work on string tables
removeIDColumns <- function(dt){
if (!is.numeric(dt[1,1][[1]])) {
dt <- dt[, -1]
if (!is.numeric(dt[1,1][[1]])) {
dt <- dt[, -1]
}
}
return(dt)
}
#' get the uorfs in the database
#' @param withExons should the uorfs be splitted by exons
#' @param withTranscripts should the uorfs have transcript information,
#' warning, this will duplicate some uorfs.
#' @return a GRangesList or data.table, if(F, F)
getUorfsInDb <- function(withExons = T, withTranscripts = T, uniqueORFs = T) {
if (withExons && withTranscripts) {
if(uniqueORFs) {
if (file.exists(p(dataBaseFolder, "/uniqueUorfsAsGRWithTx.rdata"))) {
load(p(dataBaseFolder, "/uniqueUorfsAsGRWithTx.rdata"))
return(grl)
} else stop("unique uorfs with tx does not exists")
}
if(file.exists(p(dataBaseFolder, "/uoRFsAsGRAllWithTx.rdata"))) {
load(p(dataBaseFolder, "/uoRFsAsGRAllWithTx.rdata"))
return(grl)
} else if(!tableNotExists("uorfsAsGRWithTx")) {
grl <- readTable("uorfsAsGRWithTx", asGR = T)
gr <- unlist(grl, use.names = F)
names(gr) <- gsub("_[0-9]*", "", names(gr))
return(groupGRangesBy(gr, gr$names))
}
stop("uORFs could not be found, check that they exist")
} else if (!withExons) {
return(readTable("uniqueIDs"))
} else if (withExons && !withTranscripts) {
if(uniqueORFs) {
if (file.exists(p(dataBaseFolder, "/uniqueUorfsAsGR.rdata"))) {
load(p(dataBaseFolder, "/uniqueUorfsAsGR.rdata"))
return(grl)
}
}
return(readTable("SplittedByExonsuniqueUORFs", asGR = T))
} else {
stop("not supported way of getting uorfs")
}
}
|
ed75b474ac649b8e04bf4ae1a42114250d10b7f1
|
6b0acbabf78b41cb2bf79128ec0cc47a19704488
|
/assignment3&4bySaurabhBidwai.R
|
8048c52f317063f832edd28652f77f6931550a77
|
[] |
no_license
|
wejay28/R_basics
|
edd757d973dbc37de1486afe17c4df7773eb30f2
|
a58d1d7de3fff2b2d74d22946c81b850ebd18f67
|
refs/heads/master
| 2021-06-14T12:54:43.279637 | 2017-05-21T06:46:14 | 2017-05-21T06:46:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 2,624 |
r
|
assignment3&4bySaurabhBidwai.R
|
#Q.1 print no from 1 to 5 using 'repeat' and 'break'
f=1
repeat
{
print(f)
f=f+1
if(f==6){
break()
}
}
#Q.2 Identify the no. is positive or negative
neg=function(a){
if(a>0){
print("PositiveNumber")
}
else if(a<0){
print("NegativeNumber")
}else{
print("ZeroNumber")
}
}
neg(10)
neg(0)
neg(-10)
#Q.3 print the no from 1 to 10 by using while loop
i=1
while(i<11){
print(i)
i=i+1
}
#Q.4 create the vector add the element of that vector and give its sum by using while loop
a=c(1,2,3,4)
w=length(a)
sumw=0
while(w>0){
sumw=sumw+a[w]
w=w-1
}
print(sumw)
#Q.5 create the vector add the element of that vector and give its sum by using for loop
a=c(1:10)
sumf=0
for(i in seq(1,length(a))){
sumf=sumf+a[i]
}
print(sumf)
#Q.6 create the vector and count the no of even no in that vector
a=c(1:10)
flag=0
for(i in seq(1,length(a))){
if(a[i]%%2==0){
flag=flag+1
}
}
print(flag)
#Q.7 find the factorial of no
facto=function(z){
f=1
if(z<0){
print("no is negative")
}else if(z==0){
return(1)
}else{
for (i in seq(1,z)) {
f=f*i
}
return(f)
}
}
facto(-2)
facto(0)
facto(1)
facto(3)
facto(10)
#Q.8 no is prime or not?
prime_no=function(a){
if(a<0){
return("no is negative")
}else if(a==1){
return("neither prime nor composite")
}
f=0
for(i in seq(2,a-1)){
if(a %% i ==0){
f=1
return("not prime")
}
}
if(f==0){
print("prime")
}
}
prime_no(-2)
prime_no(20)
prime_no(2)
prime_no(1)
prime_no(19)
prime_no(9)
#Q.9 find the factors of a no.
fact=function(a){
if(a<0){
return("no is negative")
}else if(a==0){
return("no is zero")
}else{
for(i in seq(1,a)){
if(a %% i ==0){
print(i)
}
}
}
}
fact(120)
fact(10)
fact(0)
fact(-10)
################Assignment4######################
#Q.1
plot(1:10,1:10,type = "n")
for (i in 1:10) {
lines(c(i,i),c(1,20))
}
for(j in 1:20){
lines(c(1,10),c(j,j))
}
#Q.2
plot(1:10,1:10)
for(i in 1:10){
for (j in 1:20) {
points(i,j)
}
}
#Q.3
plot(1:10,1:10)
for(j in 1:20){
color=if(j%%2==0){"blue"}else{"red"}
lines(c(1,10),c(j,j),col=color)
}
#Q.4 plot the sequence of red, blue and green lines
plot(1:10,1:10)
for(j in 1:20){
color=if(j%%3==0){"red"}else if(j%%3==1){"blue"}else if(j%%3==2){"green"}
lines(c(1,10),c(j,j),col=color)
}
|
019be191a59ff96209446bd77c37f6749875976d
|
cb45abba22cc632e19661516ad16d60793103495
|
/Talleres/Problema_FormulaCuadratica.r
|
dd64f8e2ac33d6ca719d4a6f956b3097c6f5eabe
|
[] |
no_license
|
Estebanmc2912/An-lisis-Num-rico
|
b48ee29bbf79588697c88dbc16eb9a1db0be42cc
|
aa6658eefeb799e317181818199548d25f58b2b9
|
refs/heads/master
| 2020-06-23T13:38:39.838320 | 2019-11-12T01:46:35 | 2019-11-12T01:46:35 | 198,640,070 | 0 | 3 | null | null | null | null |
UTF-8
|
R
| false | false | 315 |
r
|
Problema_FormulaCuadratica.r
|
#Pablo Veintemilla & Esteban Moreno:
#SIMULACION CUADRATICA ax2 + bx + c = 0.
options(digits=8)
a=3
b=9^12
c=-3
#Método suma
x1=-(b+sqrt(b^2-4*a*c))/(2*a)
#Fórmula racionalizada
x2=-(2*c)/(b+sqrt(b^2-4*a*c))
cat("Solución \n")
cat("Raíz 1: ",x1, " Raíz 2: ",x2,"\n")
|
393ae3009035dc7489263fcddef268b1bc3b4421
|
c830d7ecdd2739c356242a3141beb38960fc44e2
|
/R/freorder.R
|
10c6f6d714b25cc1ccf7529ec833d39fcce24941
|
[] |
no_license
|
STAT545-UBC-hw-2018-19/hw07-janehuang1647
|
8812c99a7eba9b0fd576f2c42ece25cb828fd068
|
84749400b004c6f0cccb02b0c7c7d3f1650db13c
|
refs/heads/master
| 2020-04-05T09:11:51.538674 | 2018-11-13T20:54:22 | 2018-11-13T20:54:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 623 |
r
|
freorder.R
|
#' Reorder Levels of a Factor
#'
#' @param x a factor, an atomic vector. The vector is treated as a categorical variable whose levels will be reordered.
#'
#' @usage freorder(x)
#' @return By default, the function will return the factor in the descending order.
#' @export
#' @examples
#' @seealso \link{reorder}
#' freorder(factor(c(1000,100,10)))\n
#' freorder(factor(c("a","z","m")))
freorder <- function (x){
# use the factor_check to make sure we have a factor input
factor_check(x)
# then use the dplyr function to reorder the factor
sortedfactor <- reorder(x,dplyr::desc(x))
return(sortedfactor)
}
|
24c8067e2da1115d5e3759c1257fcbd88921c5b2
|
b31298d41ca6b8aaf52c01bf69ec6f9f577341bd
|
/creating_covariates_dataset.R
|
1c2815b1a3172b398359b6deaf5adc32b01d7b12
|
[] |
no_license
|
nskaff/CORE
|
b3a73740c6b91b90fcd286ee95b780304cc6a101
|
494041528b0b24cafb2d242050979d700c9b20c5
|
refs/heads/master
| 2021-01-19T17:38:31.117378 | 2017-10-27T14:59:26 | 2017-10-27T14:59:26 | 101,078,656 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 3,554 |
r
|
creating_covariates_dataset.R
|
#creating a useful dataset for covariates
library(dplyr)
library(lubridate)
library(tidyr)
library(googlesheets)
library(mapview)
#loading in data with z-score
my_sheets <- gs_ls() #Will have to authenticate Google here (in browser, very easy)
core <- gs_title("z_score_data") #get whole document
z_scores <- core %>% gs_read_csv(ws = "Sheet1") #work with individual worksheet
z_scores$Lake.ID<-as.character(z_scores$Lake.ID)
#loading data template
core <- gs_title("Datatemplate_CORE_Variables") #get whole document
md <- core %>% gs_read_csv(ws = "Sheet1") #work with individual worksheet
temp_covs<-read.csv('data/coreTemps_Berkeley_9_19.csv', header=T)
temp_covs$date<-as.Date(temp_covs$date)
c3crop<-read.csv("data/core_LULC_C3crop.csv", header=T)
c4crop<-read.csv("data/core_LULC_C4crop.csv", header=T)
c3past<-read.csv("data/core_LULC_C3past.csv", header=T)
c4past<-read.csv("data/core_LULC_C4past.csv", header=T)
urban<-read.csv("data/core_LULC_Urban.csv", header=T)
#taking the mean temperature anomoly by year
temp_covs1<-aggregate(.~year(date),data=temp_covs, FUN=mean, na.action=na.pass)
colnames(temp_covs1)[1]<-"year"
temp_covs2<-temp_covs1[,c(-2)]
#converting temperature to vertical format
temp_covs3<-temp_covs2 %>% gather(key=year)
colnames(temp_covs3)[2:3]<-c("Lake.ID", "mean_annual_temp_anomaly")
temp_covs3$Lake.ID<-gsub("X","",temp_covs3$Lake.ID)
#adding the LULC covariate data
c3crop$year<-year(c3crop$date)
c3crop<-c3crop[-1]
c3crop1<-c3crop %>% gather(key=year)
colnames(c3crop1)[2]<-"Lake.ID"
colnames(c3crop1)[3]<-"%c3crop"
c3crop1$Lake.ID<-gsub("X","",c3crop1$Lake.ID)
covar_data<-full_join(temp_covs3,c3crop1,by=c("Lake.ID", "year") )
c4crop$year<-year(c4crop$date)
c4crop<-c4crop[-1]
c4crop1<-c4crop %>% gather(key=year)
colnames(c4crop1)[2]<-"Lake.ID"
colnames(c4crop1)[3]<-"%c4crop"
c4crop1$Lake.ID<-gsub("X","",c4crop1$Lake.ID)
covar_data<-full_join(covar_data,c4crop1,by=c("Lake.ID", "year") )
c3past$year<-year(c3past$date)
c3past<-c3past[-1]
c3past1<-c3past %>% gather(key=year)
colnames(c3past1)[2]<-"Lake.ID"
colnames(c3past1)[3]<-"%c3past"
c3past1$Lake.ID<-gsub("X","",c3past1$Lake.ID)
covar_data<-full_join(covar_data,c3past1,by=c("Lake.ID", "year") )
c4past$year<-year(c4past$date)
c4past<-c4past[-1]
c4past1<-c4past %>% gather(key=year)
colnames(c4past1)[2]<-"Lake.ID"
colnames(c4past1)[3]<-"%c4past"
c4past1$Lake.ID<-gsub("X","",c4past1$Lake.ID)
covar_data<-full_join(covar_data,c4past1,by=c("Lake.ID", "year") )
urban$year<-year(urban$date)
urban<-urban[-1]
urban1<-urban %>% gather(key=year)
colnames(urban1)[2]<-"Lake.ID"
colnames(urban1)[3]<-"%urban"
urban1$Lake.ID<-gsub("X","",urban1$Lake.ID)
covar_data<-full_join(covar_data,urban1,by=c("Lake.ID", "year") )
covar_data[,"%total_crop"]<-(covar_data$`%c3crop`+covar_data$`%c4crop`)
covar_data[,"%total_past"]<-(covar_data$`%c3past`+covar_data$`%c4past`)
z_scores$Lake.ID<-as.character(z_scores$Lake.ID)
#adding z-score data
covar_data<-full_join(covar_data, z_scores, by=c("year", "Lake.ID"))
#removing years before 1850 and lakes without a Z score marked with X
covar_data1<-covar_data[covar_data$year>=1850 & covar_data$Lake.ID %in% as.character(md$Lake.ID[md$'Included in model'=="y"]),]
#adding in method, area, lat/long, elevation,
md$Lake.ID<-as.character(md$Lake.ID)
covar_data2<-full_join(covar_data1,md[,c(1,15,19,20,21,22)], by=c("Lake.ID"))
#testing to see if all years included
tapply(covar_data1$year, covar_data1$Lake.ID, function(x){length(x)})
write.csv(covar_data1, "data/covariates_data_10_27_17.csv", row.names=F)
|
f46802d5fd4d7c7aa6dab382107bc177309905d9
|
96380c781c896f2731e301c9fe17bbb1303b3344
|
/svm_analysis_final.R
|
d5c2b6c50bd1a98b0a80ecfb4288811324c8e530
|
[] |
no_license
|
tborrman/DNA-rep
|
e5c6b955059d48dd5f8fd8dfee5db9ab300481bf
|
d0a311b82029de5537d377550b31cba5c66cb763
|
refs/heads/master
| 2020-04-22T10:15:38.496413 | 2017-10-18T00:21:51 | 2017-10-18T00:21:51 | 42,599,464 | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | false | 16,195 |
r
|
svm_analysis_final.R
|
# A script to analyze DNA replication origin data by support vector machines
library("e1071");
library("ROCR");
# Set paths
begPath <- "/Users/User/Research/DNArep";
wkDir <- paste(begPath, "/Data", sep="");
# Read in ori_data_1.6.txt
full_ori_data <- read.table(paste(wkDir, "/ori_data_1.8.txt", sep=""), header=TRUE, sep="\t", comment.char="");
# Classify data as early or late defined by Scott Yang's parameter n from MIM model s.t.
# early = n > median(n)
# late = n <= median(n)
# Get median n value
full_n <- full_ori_data$yang_n
median_n <- median(full_n, na.rm = TRUE);
# Create class vector for labeling early and late origins
class <- sapply(full_n, function(x) {
if(is.na(x)) {
return(NA);
}
else if(x > median_n) {
return("early");
}
else if(x <= median_n){
return("late");
}
});
# Add classification to data
ori_data_class <- cbind(full_ori_data, class);
# Extract all data containing an n parameter
ori_data_clean <- ori_data_class[-which(is.na(full_n)), ];
# Remove remaining origins in rDNA which skew ChIP-seq
ori_data <- ori_data_clean[-which(ori_data_clean$ID == 534),];
# Give 0 to all NAs in rpd3 data
ori_data[which(is.na(ori_data$knott_update_Rpd3_WT_diff)), "knott_update_Rpd3_WT_diff"] <- 0;
# Use 2/3 of data as a training set for svm model
# Remaining 1/3 of data will be the test set
training_total <- round(nrow(ori_data)* (2/3));
training_indices <- sample(1:nrow(ori_data), training_total);
test_indices <- setdiff(1:nrow(ori_data), training_indices);
training_set <- ori_data[training_indices,];
test_set <- ori_data[test_indices,];
# Create svm model from training set using just macalpine replicate 2 ChIP seq
functions <- c("linear", "polynomial", "radial", "sigmoid");
for (kern_funct in functions) {
MCM_training <- training_set[c("macalpine_2_MCM_no_mult", "class")];
MCM_test <- test_set[c("macalpine_2_MCM_no_mult")];
svm_model <- svm(class~., data= MCM_training, kernel = kern_funct, cost = 1, type = "C-classification", probability = TRUE);
summary(svm_model);
predict_values <- predict(svm_model, MCM_test, probability = TRUE);
# Confusion matrix
confusion_matrix <- table(pred = predict_values, true = test_set$class);
# Compute sensititivity = TP/ (TP + FN) and specificity = TN / (TN + FP)
# such that Positive = early and Negative = late
TP <- confusion_matrix["early", "early"];
FP <- confusion_matrix["early", "late"];
TN <- confusion_matrix["late", "late"];
FN <- confusion_matrix["late", "early"];
sensitivity <- TP / (TP + FN);
specificity <- TN / (TN + FP);
# Compute ROC curves
ROC_pred <- prediction(attr(predict_values, "probabilities") [,"early"], test_set$class == "early" );
ROC_perf <- performance(ROC_pred, measure = "tpr", x.measure = "fpr");
#profilePath <- paste(begPath, "/plot.pdf", sep="");
#plot(ROC_perf,col="BLUE");
#dev.off();
ROC_AUC<-as.numeric(performance(ROC_pred, measure = "auc", x.measure
= "cutoff")@ y.values);
profilePath <- paste(begPath, "/Results/svm/1D/", kern_funct, "_svm_plot_1D.pdf", sep="");
pdf(profilePath, width=10, height=8);
#plot(svm_model_2D, MCM_Ku_training);
title = paste(" ROC Plot: AUC = ", round(ROC_AUC, digits = 2), sep = "");
plot(ROC_perf, col= "BLUE", main = title);
dev.off();
}
# Create svm model from training set using just ku data
functions <- c("linear", "polynomial", "radial", "sigmoid");
for (kern_funct in functions) {
Ku_training <- training_set[c("donaldson_WT_yku70_diff_plus3", "class")];
Ku_test <- test_set[c("donaldson_WT_yku70_diff_plus3")];
svm_model_ku_1 <- svm(class~., data= Ku_training, kernel = kern_funct, cost = 1, type = "C-classification", probability = TRUE);
summary(svm_model_ku_1);
predict_values_ku_1 <- predict(svm_model_ku_1, Ku_test, probability = TRUE);
# Confusion matrix
confusion_matrix_ku_1 <- table(pred = predict_values_ku_1, true = test_set$class);
# Compute sensititivity = TP/ (TP + FN) and specificity = TN / (TN + FP)
# such that Positive = early and Negative = late
TP <- confusion_matrix_ku_1["early", "early"];
FP <- confusion_matrix_ku_1["early", "late"];
TN <- confusion_matrix_ku_1["late", "late"];
FN <- confusion_matrix_ku_1["late", "early"];
sensitivity_ku_1 <- TP / (TP + FN);
specificity_ku_1 <- TN / (TN + FP);
# Compute ROC curves
ROC_pred_ku_1 <- prediction(attr(predict_values_ku_1, "probabilities") [,"early"], test_set$class == "early" );
ROC_perf_ku_1 <- performance(ROC_pred_ku_1, measure = "tpr", x.measure = "fpr");
#profilePath <- paste(begPath, "/plot.pdf", sep="");
#plot(ROC_perf,col="BLUE");
#dev.off();
ROC_AUC_ku_1 <-as.numeric(performance(ROC_pred_ku_1, measure = "auc", x.measure
= "cutoff")@ y.values);
profilePath <- paste(begPath, "/Results/svm/ku_1/", kern_funct, "_svm_plot_1D_ku.pdf", sep="");
pdf(profilePath, width=10, height=8);
#plot(svm_model_2D, MCM_Ku_training);
title = paste(" ROC Plot: AUC = ", round(ROC_AUC_ku_1, digits = 2), sep = "");
plot(ROC_perf_ku_1, col= "BLUE", main = title);
dev.off();
}
# Create svm model from training set using just rpd3 data
functions <- c("linear", "polynomial", "radial", "sigmoid");
for (kern_funct in functions) {
Rpd3_training <- training_set[c("knott_update_Rpd3_WT_diff", "class")];
Rpd3_test <- test_set[c("knott_update_Rpd3_WT_diff")];
svm_model_rpd3_1 <- svm(class~., data= Rpd3_training, kernel = kern_funct, cost = 1, type = "C-classification", probability = TRUE);
summary(svm_model_rpd3_1);
predict_values_rpd3_1 <- predict(svm_model_rpd3_1, Rpd3_test, probability = TRUE);
# Confusion matrix
confusion_matrix_rpd3_1 <- table(pred = predict_values_rpd3_1, true = test_set$class);
# Compute sensititivity = TP/ (TP + FN) and specificity = TN / (TN + FP)
# such that Positive = early and Negative = late
TP <- confusion_matrix_rpd3_1["early", "early"];
FP <- confusion_matrix_rpd3_1["early", "late"];
TN <- confusion_matrix_rpd3_1["late", "late"];
FN <- confusion_matrix_rpd3_1["late", "early"];
sensitivity_rpd3_1 <- TP / (TP + FN);
specificity_rpd3_1 <- TN / (TN + FP);
# Compute ROC curves
ROC_pred_rpd3_1 <- prediction(attr(predict_values_rpd3_1, "probabilities") [,"early"], test_set$class == "early" );
ROC_perf_rpd3_1 <- performance(ROC_pred_rpd3_1, measure = "tpr", x.measure = "fpr");
#profilePath <- paste(begPath, "/plot.pdf", sep="");
#plot(ROC_perf,col="BLUE");
#dev.off();
ROC_AUC_rpd3_1 <-as.numeric(performance(ROC_pred_rpd3_1, measure = "auc", x.measure
= "cutoff")@ y.values);
profilePath <- paste(begPath, "/Results/svm/rpd3_1/", kern_funct, "_svm_plot_1D_rpd3_1.pdf", sep="");
pdf(profilePath, width=10, height=8);
#plot(svm_model_2D, MCM_Ku_training);
title = paste(" ROC Plot: AUC = ", round(ROC_AUC_rpd3_1, digits = 2), sep = "");
plot(ROC_perf_rpd3_1, col= "BLUE", main = title);
dev.off();
}
# Let's add the ku mutant difference in Trep data and train again!
functions <- c("linear", "polynomial", "radial", "sigmoid");
for (kern_funct in functions) {
#MCM_col = 82;
#Ku_col = 63;
#class_col = 93;
MCM_Ku_training <- training_set[c("macalpine_2_MCM_no_mult", "donaldson_WT_yku70_diff_plus3", "class")];
MCM_KU_test <- test_set[c("macalpine_2_MCM_no_mult", "donaldson_WT_yku70_diff_plus3")];
svm_model_2D <- svm(class~., data= MCM_Ku_training , kernel = kern_funct, cost = 1, type = "C-classification", probability = TRUE);
summary(svm_model_2D);
#plot(svm_model_2D, MCM_Ku_training);
predict_values_2D <- predict(svm_model_2D, MCM_KU_test, probability = TRUE);
# Confusion matrix
confusion_matrix_2D <- table(pred = predict_values_2D, true = test_set$class);
# Compute sensititivity = TP/ (TP + FN) and specificity = TN / (TN + FP)
# such that Positive = early and Negative = late
TP <- confusion_matrix_2D["early", "early"];
FP <- confusion_matrix_2D["early", "late"];
TN <- confusion_matrix_2D["late", "late"];
FN <- confusion_matrix_2D["late", "early"];
sensitivity_2D <- TP / (TP + FN);
specificity_2D <- TN / (TN + FP);
# Compute ROC curves
ROC_pred_ku <- prediction(attr(predict_values_2D, "probabilities") [,"early"], test_set$class == "early" );
ROC_perf_ku <- performance(ROC_pred_ku, measure = "tpr", x.measure = "fpr");
#profilePath <- paste(begPath, "/plot.pdf", sep="");
#plot(ROC_perf,col="BLUE");
#dev.off();
ROC_AUC_ku<-as.numeric(performance(ROC_pred_ku, measure = "auc", x.measure
= "cutoff")@ y.values);
profilePath <- paste(begPath, "/Results/svm/ku/", kern_funct, "_svm_plot_2D_ku.pdf", sep="");
pdf(profilePath, width=10, height=8);
plot(svm_model_2D, MCM_Ku_training);
title = paste(" ROC Plot: AUC = ", round(ROC_AUC_ku, digits = 2), sep = "");
plot(ROC_perf_ku, col= "BLUE", main = title);
dev.off();
}
# Let's try rpd3 data alone and train again!
functions <- c("linear", "polynomial", "radial", "sigmoid");
for (kern_funct in functions) {
#MCM_col = 82;
#Ku_col = 63;
#class_col = 93;
MCM_rpd3_training <- training_set[c("macalpine_2_MCM_no_mult", "knott_update_Rpd3_WT_diff", "class")];
MCM_rpd3_test <- test_set[c("macalpine_2_MCM_no_mult", "knott_update_Rpd3_WT_diff")];
svm_model_rpd3 <- svm(class~., data= MCM_rpd3_training , kernel = kern_funct, cost = 1, type = "C-classification", probability = TRUE);
summary(svm_model_rpd3);
#plot(svm_model_2D, MCM_Ku_training);
predict_values_rpd3 <- predict(svm_model_rpd3, MCM_rpd3_test, probability = TRUE);
# Confusion matrix
confusion_matrix_rpd3 <- table(pred = predict_values_rpd3, true = test_set$class);
# Compute sensititivity = TP/ (TP + FN) and specificity = TN / (TN + FP)
# such that Positive = early and Negative = late
TP <- confusion_matrix_rpd3["early", "early"];
FP <- confusion_matrix_rpd3["early", "late"];
TN <- confusion_matrix_rpd3["late", "late"];
FN <- confusion_matrix_rpd3["late", "early"];
sensitivity_rpd3 <- TP / (TP + FN);
specificity_rpd3 <- TN / (TN + FP);
# Compute ROC curves
ROC_pred_rpd3 <- prediction(attr(predict_values_rpd3, "probabilities") [,"early"], test_set$class == "early" );
ROC_perf_rpd3 <- performance(ROC_pred_rpd3, measure = "tpr", x.measure = "fpr");
#profilePath <- paste(begPath, "/plot.pdf", sep="");
#plot(ROC_perf,col="BLUE");
#dev.off();
ROC_AUC_rpd3 <-as.numeric(performance(ROC_pred_rpd3, measure = "auc", x.measure
= "cutoff")@ y.values);
profilePath <- paste(begPath, "/Results/svm/rpd3/", kern_funct, "_svm_plot_2D_rpd3.pdf", sep="");
pdf(profilePath, width=10, height=8);
plot(svm_model_rpd3, MCM_rpd3_training);
title = paste(" ROC Plot: AUC = ", round(ROC_AUC_rpd3, digits = 2), sep = "");
plot(ROC_perf_rpd3, col= "BLUE", main = title);
dev.off();
}
# Let's add the Rpd3 dependent data and ku and train again!
functions <- c("linear", "polynomial", "radial", "sigmoid");
for (kern_funct in functions) {
#MCM_col = 82;
#Ku_col = 63;
#Rpd3_col = 92;
#class_col = 93;
MCM_Ku_rpd3_training <- training_set[c("macalpine_2_MCM_no_mult", "donaldson_WT_yku70_diff_plus3","knott_update_Rpd3_WT_diff", "class")];
MCM_KU_rpd3_test <- test_set[c("macalpine_2_MCM_no_mult", "donaldson_WT_yku70_diff_plus3","knott_update_Rpd3_WT_diff")];
svm_model_ku_rpd3 <- svm(class~., data= MCM_Ku_rpd3_training , kernel = kern_funct, cost = 1, type = "C-classification", probability = TRUE);
summary(svm_model_ku_rpd3);
#plot(svm_model_2D, MCM_Ku_training);
predict_values_ku_rpd3 <- predict(svm_model_ku_rpd3, MCM_KU_rpd3_test, probability = TRUE);
# Confusion matrix
confusion_matrix_ku_rpd3 <- table(pred = predict_values_ku_rpd3, true = test_set$class);
# Compute sensititivity = TP/ (TP + FN) and specificity = TN / (TN + FP)
# such that Positive = early and Negative = late
TP <- confusion_matrix_ku_rpd3["early", "early"];
FP <- confusion_matrix_ku_rpd3["early", "late"];
TN <- confusion_matrix_ku_rpd3["late", "late"];
FN <- confusion_matrix_ku_rpd3["late", "early"];
sensitivity_ku_rpd3 <- TP / (TP + FN);
specificity_ku_rpd3 <- TN / (TN + FP);
# Compute ROC curves
ROC_pred_ku_rpd3 <- prediction(attr(predict_values_ku_rpd3, "probabilities") [,"early"], test_set$class == "early" );
ROC_perf_ku_rpd3 <- performance(ROC_pred_ku_rpd3, measure = "tpr", x.measure = "fpr");
#profilePath <- paste(begPath, "/plot.pdf", sep="");
#plot(ROC_perf,col="BLUE");
#dev.off();
ROC_AUC_ku_rpd3 <-as.numeric(performance(ROC_pred_ku_rpd3, measure = "auc", x.measure
= "cutoff")@ y.values);
profilePath <- paste(begPath, "/Results/svm/ku_rpd3/", kern_funct, "_svm_plot_ku_rpd3.pdf", sep="");
pdf(profilePath, width=10, height=8);
plot(svm_model_ku_rpd3, MCM_Ku_rpd3_training, macalpine_2_MCM_no_mult ~ knott_update_Rpd3_WT_diff);
title = paste(" ROC Plot: AUC = ", round(ROC_AUC_ku_rpd3, digits = 2), sep = "");
plot(ROC_perf_ku_rpd3, col= "BLUE", main = title);
dev.off();
}
# Let's add everything we got and train again!
functions <- c("linear", "polynomial", "radial", "sigmoid");
for (kern_funct in functions) {
#MCM_col = 82;
#Ku_col = 63;
#Rpd3_col = 92;
#class_col = 93;
MCM_8D_training <- training_set[c("macalpine_2_MCM_no_mult","dsSPD4a_MCM_no_mult", "dsSPD4b_1_MCM_no_mult", "dsSPD8a_MCM_no_mult", "dsSPD8b_MCM_no_mult", "macalpine_1_MCM_no_mult","donaldson_WT_yku70_diff_plus3","knott_update_Rpd3_WT_diff", "class")];
MCM_8D_test <- test_set[c("macalpine_2_MCM_no_mult", "dsSPD4a_MCM_no_mult", "dsSPD4b_1_MCM_no_mult", "dsSPD8a_MCM_no_mult", "dsSPD8b_MCM_no_mult", "macalpine_1_MCM_no_mult", "donaldson_WT_yku70_diff_plus3","knott_update_Rpd3_WT_diff")];
svm_model_8D <- svm(class~., data= MCM_8D_training , kernel = kern_funct, cost = 1, type = "C-classification", probability = TRUE);
summary(svm_model_8D);
#plot(svm_model_2D, MCM_Ku_training);
predict_values_8D <- predict(svm_model_8D, MCM_8D_test, probability = TRUE);
# Confusion matrix
confusion_matrix_8D <- table(pred = predict_values_8D, true = test_set$class);
# Compute sensititivity = TP/ (TP + FN) and specificity = TN / (TN + FP)
# such that Positive = early and Negative = late
TP <- confusion_matrix_8D["early", "early"];
FP <- confusion_matrix_8D["early", "late"];
TN <- confusion_matrix_8D["late", "late"];
FN <- confusion_matrix_8D["late", "early"];
sensitivity_8D <- TP / (TP + FN);
specificity_8D <- TN / (TN + FP);
# Compute ROC curves
ROC_pred_8D <- prediction(attr(predict_values_8D, "probabilities") [,"early"], test_set$class == "early" );
ROC_perf_8D <- performance(ROC_pred_8D, measure = "tpr", x.measure = "fpr");
#profilePath <- paste(begPath, "/plot.pdf", sep="");
#plot(ROC_perf,col="BLUE");
#dev.off();
ROC_AUC_8D <-as.numeric(performance(ROC_pred_8D, measure = "auc", x.measure
= "cutoff")@ y.values);
profilePath <- paste(begPath, "/Results/svm/8D/", kern_funct, "_svm_plot_8D.pdf", sep="");
pdf(profilePath, width=10, height=8);
plot(svm_model_8D, MCM_8D_training, macalpine_2_MCM_no_mult ~ knott_update_Rpd3_WT_diff);
title = paste(" ROC Plot: AUC = ", round(ROC_AUC_8D, digits = 2), sep = "");
plot(ROC_perf_8D, col= "BLUE", main = title);
dev.off();
}
|
64cc51a1ef8d48d11033c35dc7f7224035e74330
|
d373be2775975e19c92321809900453645663fc9
|
/R/landmarks.R
|
2cf2c3a128d84c3158d2e7252bf0aae51ce2b01c
|
[] |
no_license
|
spencerbell/tigris
|
84b4f532d957fca402df375da30fcaa2ec7f4f6d
|
8779398dc2203ad917c7c78035c99ea92f237195
|
refs/heads/master
| 2021-01-19T13:04:07.240755 | 2017-04-12T14:17:41 | 2017-04-12T14:17:41 | 88,059,990 | 0 | 0 | null | 2017-04-12T14:16:50 | 2017-04-12T14:16:49 | null |
UTF-8
|
R
| false | false | 4,472 |
r
|
landmarks.R
|
#' Download the Military Installation National Shapefile into R
#'
#' Description from the US Census Bureau: "The Census Bureau includes landmarks
#' such as military installations in the MAF/TIGER database for
#' locating special features and to help enumerators during field operations. The Census Bureau adds
#' landmark features to the database on an as-needed basis and does not attempt to ensure that all
#' instances of a particular feature are included. For additional information about area landmarks, please
#' see Section 3.12, Landmarks (Area and Point)."
#'
#' This file does not include the three point landmarks identified as military installation features in the
#' MAF/TIGER database. These point landmarks are included in the point landmark shapefile.
#' Although almost all military installations have assigned 8-character National Standard (GNIS) codes, the
#' Census Bureau has not loaded most of this data into the MAF/TIGER database. The 2015 military
#' shapefiles contain few values in the ANSICODE field.
#' @param ... arguments to be passed to the underlying `load_tiger` function, which is not exported.
#' Options include \code{refresh}, which specifies whether or not to re-download shapefiles
#' (defaults to \code{FALSE}), and \code{year}, the year for which you'd like to download data
#' (defaults to 2015).
#' @seealso \url{http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2015/TGRSHP2015_TechDoc_Ch3.pdf}
#' @export
military <- function(...) {
url <- "http://www2.census.gov/geo/tiger/TIGER2015/MIL/tl_2015_us_mil.zip"
return(load_tiger(url, tigris_type = "military", ...))
}
#' Download a point or area landmarks shapefile into R
#'
#' Description from the US Census Bureau:
#' "The Census Bureau includes landmarks in the MAF/TIGER database (MTDB) for locating special features
#' and to help enumerators during field operations. Some of the more common landmark types include area
#' landmarks such as airports, cemeteries, parks, and educational facilities and point landmarks such as
#' schools and churches."
#'
#' The Census Bureau adds landmark features to the database on an as-needed basis and makes no
#' attempt to ensure that all instances of a particular feature were included. The absence of a landmark
#' such as a hospital or prison does not mean that the living quarters associated with that landmark were
#' excluded from the 2010 Census enumeration. The landmarks were not used as the basis for building or
#' maintaining the address list used to conduct the 2010 Census.
#'
#' Area landmark and area water features can overlap; for example, a park or other special land-use feature
#' may include a lake or pond. In this case, the polygon covered by the lake or pond belongs to a water
#' feature and a park landmark feature. Other kinds of landmarks can overlap as well. Area landmarks can
#' contain point landmarks, but these features are not linked in the TIGER/Line Shapefiles.
#'
#' Landmarks may be identified by a MAF/TIGER feature class code only and may not have a name. Each
#' landmark has a unique area landmark identifier (AREAID) or point landmark identifier (POINTID) value.
#'
#' @seealso \url{http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2015/TGRSHP2015_TechDoc_Ch3.pdf}
#'
#' @param state The state for which you'd like to download the landmarks
#' @param type Whether you would like to download point landmarks (\code{"point"}) or area landmarks (\code{"area"}). Defaults to \code{"point"}.
#' @param ... arguments to be passed to the underlying `load_tiger` function, which is not exported.
#' Options include \code{refresh}, which specifies whether or not to re-download shapefiles
#' (defaults to \code{FALSE}), and \code{year}, the year for which you'd like to download data
#' (defaults to 2015).
#' @export
landmarks <- function(state, type = "point", ...) {
state <- validate_state(state)
if (type == "area") {
url <- paste0("http://www2.census.gov/geo/tiger/TIGER2015/AREALM/tl_2015_", state, "_arealm.zip")
return(load_tiger(url, tigris_type = "area_landmark", ...))
} else if (type == "point") {
url <- paste0("http://www2.census.gov/geo/tiger/TIGER2015/POINTLM/tl_2015_", state, "_pointlm.zip")
return(load_tiger(url, tigris_type = "point_landmark", ...))
} else {
stop('The argument supplied to type must be either "point" or "area"', call. = FALSE)
}
}
|
71a1fc393ad2dd6db4ef4e5c470b88d3c4f4a6ef
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.servicecatalog/man/associate_service_action_with_provisioning_artifact.Rd
|
be3064ef5c4d3326864d452a6cf70f3c53b011e7
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
R
| false | true | 1,230 |
rd
|
associate_service_action_with_provisioning_artifact.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.servicecatalog_operations.R
\name{associate_service_action_with_provisioning_artifact}
\alias{associate_service_action_with_provisioning_artifact}
\title{Associates a self-service action with a provisioning artifact}
\usage{
associate_service_action_with_provisioning_artifact(ProductId,
ProvisioningArtifactId, ServiceActionId, AcceptLanguage = NULL)
}
\arguments{
\item{ProductId}{[required] The product identifier. For example, \code{prod-abcdzk7xy33qa}.}
\item{ProvisioningArtifactId}{[required] The identifier of the provisioning artifact. For example, \code{pa-4abcdjnxjj6ne}.}
\item{ServiceActionId}{[required] The self-service action identifier. For example, \code{act-fs7abcd89wxyz}.}
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
}
\description{
Associates a self-service action with a provisioning artifact.
}
\section{Accepted Parameters}{
\preformatted{associate_service_action_with_provisioning_artifact(
ProductId = "string",
ProvisioningArtifactId = "string",
ServiceActionId = "string",
AcceptLanguage = "string"
)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.