blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53a8896e8ffd467a6930601f220ab40e66e9da9b | 2dd0350d1b7d79a54aa88a58e1e259b9f35f69de | /energy.R | 90e9c45b781edea714f29d85776a10e993ee82b7 | [] | no_license | hannyh/Time-Series | 75541bf508db0ae5e34a19b0233f30f686a78e19 | 3afedb974048aa92ebce075fb5f0ae6ffdce6aab | refs/heads/master | 2020-03-25T12:12:21.834685 | 2018-08-06T18:00:00 | 2018-08-06T18:00:00 | 143,761,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,024 | r | energy.R |
# US Residential energy consumption
#Data from site: https://www.eia.gov/totalenergy/data/browser/?tbl=T02.02#/?f=M
#accessed Jan 12, 2018
data1 <- read.csv(file="/Users/hannahward/Documents/School/Winter 2018/Regression/Time Series/energy.csv", header=TRUE)
head(data1)
#Subset to TERCBUS Total Energy Consumed by the Residential Sector
data2 <- subset(data1, MSN=="TERCBUS")
#Subset to my lifetime
data3 <- subset(data2, data2$YYYYMM > 199100)
#Removes yearly total (coded month 13)
data4 <- subset(data3, data3$YYYYMM%%100 != 13)
energy <- as.numeric(data4$Value)
head(energy)
tail(energy)
#EDA: plot data to look for monthly patterns
plot(energy~as.factor(data4$YYYYMM), pch=19)
#or
plot(energy, type="b")
#There appears to be a cyclical trend going on with the months. Some months of the year have high energy
#consumption, while others months have lower energy consumption. This business/economic trend could be
#caused by the weather in the United States. Many states are much colder in the winter, requiring more
#energy to keep a house warm. It is also very warm in the summer in some states, and many use air
# conditioning to keep their homes cool, which also increases energy use within the residential sector.
T <- length(energy)
#Analysis
#Fit Model ARIMA (1,1,1)x(1,1,1)12
# Features month to month correllation as well as year to year correllation
library(astsa)
energy.out <- sarima(energy,1,1,1,1,1,1,12)
#Report of Parameter Estimates and Stantard Errors
energy.out$ttable
#Note ar1 is lower case phi, ma1 is lower case theta, sar1 is capital phi, and sma1 is capital theta
#Predictions for the next 24 Months
energy.future <- sarima.for(energy, n.ahead=24,1,1,1,1,1,1,12)
# compute 95% prediction intervals
L <- energy.future$pred - 2*energy.future$se
U <- energy.future$pred + 2*energy.future$se
#Table of predictions and prediction intervals
cbind(energy.future$pred, L, U)
#Graphic
plot(energy[290:T], type="b", xlim=c(0,T-290+24), main="24 Month Prediction of US Residential Energy Consumption",
ylab="Energy Consumption (in trillion Btu)", pch=19)
lines((T+1-290):(T-290+length(energy.future$pred)), energy.future$pred, col="red",type="b",pch=19)
lines((T+1-290):(T-290+length(energy.future$pred)), L, col="darkgray",lty=2)
lines((T+1-290):(T-290+length(energy.future$pred)), U ,col="darkgray",lty=2)
#Research Task: Predict US Residential energy consumption for the next 2 years (24 months)
#Data Features: Month to Month correlation as well as year over year correlation. We expect the
#pattern to continue for the next 24 months.
# Analysis Weaknesses:
# Doesn't account for long long term trends and changes, such as global warming.
# Doesn't account for a changes in the demand for energy due to the increase of alternative energy
# sources that people can provide for themselves.
#Challenge
##Research Task: Predict the Daily High Temperature at the Salt Lake City Airport for the next year
##Data found at https://www.wunderground.com/history/airport/
|
1af050355212715518eca1a29ee7919ad0dd223f | 6334b663b9508cf0cda2d992f3efdffc4b4ec2cf | /man/predict.textmining.Rd | 4bcaf1fe710e217cbffbdef19aab1a5a66015bd3 | [] | no_license | cran/fdm2id | 40f7fb015f3ae231ca15ea7f5c5626187f753e1b | c55e577541b49e878f581b44dd2a8bae205779d0 | refs/heads/master | 2023-06-29T00:54:58.024554 | 2023-06-12T12:10:02 | 2023-06-12T12:10:02 | 209,820,600 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,016 | rd | predict.textmining.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text.R
\name{predict.textmining}
\alias{predict.textmining}
\title{Model predictions}
\usage{
\method{predict}{textmining}(object, test, fuzzy = FALSE, ...)
}
\arguments{
\item{object}{The classification model (of class \code{\link{textmining-class}}, created by \code{\link{TEXTMINING}}.}
\item{test}{The test set (a \code{data.frame})}
\item{fuzzy}{A boolean indicating whether fuzzy classification is used or not.}
\item{...}{Other parameters.}
}
\value{
A vector of predicted values (\code{factor}).
}
\description{
This function predicts values based upon a model trained for text mining.
}
\examples{
\dontrun{
require (text2vec)
data ("movie_review")
d = movie_review [, 2:3]
d [, 1] = factor (d [, 1])
d = splitdata (d, 1)
model = TEXTMINING (d$train.x, NB, labels = d$train.y, mincount = 50)
pred = predict (model, d$test.x)
evaluation (pred, d$test.y)
}
}
\seealso{
\code{\link{TEXTMINING}}, \code{\link{textmining-class}}
}
|
092ff7757e99e41937fd01c15a64a7eaa7098bf8 | 09d9809daccc4a15453bd699383e002780847c49 | /cachematrix.R | a60bccc2356e9da7c6b99fd4f1e942cef595af6b | [] | no_license | chadpmcd/ProgrammingAssignment2 | 6e5c1f51713d4040f724fdd52ad2745e807bdda8 | 9dceb616767d328ebda4ce2f945fb795f2acdb32 | refs/heads/master | 2021-01-12T19:24:21.040533 | 2015-06-21T02:04:11 | 2015-06-21T02:04:11 | 37,774,042 | 0 | 0 | null | 2015-06-20T15:13:53 | 2015-06-20T15:13:52 | null | UTF-8 | R | false | false | 1,483 | r | cachematrix.R | ## --------------
## The set of functions in this file allow one to get an inverse matrix using the
## solve function. The inverse matrix is cached in the makeCacheMatrix environment
## and can be updated and pulled from the cacheSolve function environment.
## --------------
## --------------
## makeCacheMatrix
## This function creates sub functions for setting a matrix, getting a matrix,
## setting the inverse of a matrix, and getting the inverse of a matrix.
## The inverse matrix is stored in memory for later use.
## --------------
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## --------------
## cacheSolve
## Using the functions defined in makeCacheMatrix, checks to see if the inverse
## has been set/cached and returns it if it has or it will calculate the
## inverse matrix and set it in cache.
## --------------
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
6dd4bc7c7457aaa7fbabc7b0454c291dcf6f38f9 | e1635e481a60c783abf3fc4575da2d81713dfd49 | /R/get_gap_index_raster.R | 2f6941bf237b5253370b1757cd52483ac2feffae | [
"MIT"
] | permissive | traitecoevo/mortality_bci | e5b7b7b7bf712458a42aac58258a5749c16268a4 | ff38c77011f36dcd27452f9e8fd84e5842e3e336 | refs/heads/master | 2021-03-27T18:23:54.635511 | 2019-01-07T23:36:43 | 2019-01-07T23:36:43 | 20,089,381 | 7 | 3 | null | 2018-08-26T23:28:47 | 2014-05-23T06:40:30 | R | UTF-8 | R | false | false | 1,526 | r | get_gap_index_raster.R | #' Creates gap index raster for BCI plot for censuses 1985 to 1990 and 1990 to 1995
#'
#' Creates gap index raster for BCI plot for censuses 1985 to 1990 and 1990 to 1995
#' @param canopy_data Dataframe containing canopy data.
#' @param weight_matrix Integer matrix. A weight to be applied to both focal cell and those surround it.
#' Default: matrix(c(1, 1, 1, 1, 8, 1, 1, 1, 1), 3, 3) weights the focal cell as the sum of all immediate neighbouring cells
#' @return List of rasters
#' @author James Camac (\email{[email protected]}) & Daniel Falster (\email{[email protected]})
#' @export
get_gap_index_raster <- function(canopy_data,weight_matrix = matrix(c(1, 1, 1, 1, 8, 1, 1, 1, 1), 3, 3)) {
`%>%` <- magrittr::`%>%`
gap_data(canopy_data) %>%
raster::as.data.frame(.) %>% # converts to df for sp package
{sp::coordinates(.) <- ~x+y; .} %>%
raster::shift(x=2.5, y=2.5) %>% # centers coordinates on cell mid point
sp::split(.$censusid) %>% # splits by census
lapply(function(x) { # Converts to raster
raster::rasterFromXYZ(raster::as.data.frame(x)[c('x','y','gap_index')], res = c(5,5))
}) %>% # calculates mean gap index value using weights
lapply(raster::focal, weight_matrix, mean) %>% # calculates mean gap index value using weights
lapply(setNames, 'gap_index') %>% # Names the gap index column
lapply(function(x) x/raster::maxValue(x)) %>% # Converts to binary scale 0 = no gap 1 = full gap.
{names(.) <- c("1985 to 1990", "1990 to 1995"); .}
} |
2050f47390f3cd4187507ffe435d47641a81bb0c | 5e42a668e417fd55fe28ecee719c759016f963b9 | /R/make_linter_from_regex.R | ed98441a3efb3a6b7f500b703d29b1b3c9f8ef9c | [
"MIT"
] | permissive | cordis-dev/lintr | 2120e22820e8499ca3066fa911572fd89c49d300 | cb694d5e4da927f56c88fa5d8972594a907be59a | refs/heads/main | 2023-08-05T08:50:42.679421 | 2023-07-25T13:21:29 | 2023-07-25T13:21:29 | 225,583,354 | 0 | 0 | NOASSERTION | 2019-12-03T09:41:30 | 2019-12-03T09:41:30 | null | UTF-8 | R | false | false | 3,388 | r | make_linter_from_regex.R | make_linter_from_regex <- function(regex,
lint_type,
lint_msg,
ignore_strings = TRUE) {
# If a regex-based linter is found, only flag those lints that occur within
# a relevant section of source code
.in_ignorable_position <- function(source_expression, line_number, match) {
ignore_strings && in_string(source_expression, line_number, match)
}
function() {
Linter(function(source_expression) {
if (!is_lint_level(source_expression, "expression")) {
return(list())
}
all_matches <- re_matches(
source_expression[["lines"]],
regex,
locations = TRUE,
global = TRUE
)
line_numbers <- as.integer(names(source_expression[["lines"]]))
lints <- Map(
function(line_matches, line_number) {
lapply(
split(line_matches, seq_len(nrow(line_matches))),
function(.match) {
if (
is.na(.match[["start"]]) ||
.in_ignorable_position(source_expression, line_number, .match)
) {
return()
}
start <- .match[["start"]]
end <- .match[["end"]]
Lint(
filename = source_expression[["filename"]],
line_number = line_number,
column_number = start,
type = lint_type,
message = lint_msg,
line = source_expression[["lines"]][[as.character(line_number)]],
ranges = list(c(start, end))
)
}
)
},
all_matches,
line_numbers
)
Filter(function(x) any(lengths(x) > 0L), lints)
})
}
}
#' Determine if a regex match is covered by an expression in a source_expression
#'
#' @param source_expression A source_expression
#' @param line_number,match The position where a regex match was observed.
#' match must have entries "start" and "end".
#' @param token_type Restrict analysis to tokens of this type, for example,
#' with token_type = "STR_CONST" you can check that a regex match occurs
#' within a string
#' @noRd
is_match_covered <- function(source_expression, line_number, match, token_type = NULL) {
pc <- source_expression[["parsed_content"]]
if (!is.null(token_type)) {
pc <- pc[pc[["token"]] == token_type, ]
}
covering_rows <- pc[["line1"]] <= line_number & pc[["line2"]] >= line_number
pc_cover <- pc[covering_rows, ]
any_single_line_covers <- function() {
x <- pc_cover[pc_cover[["line1"]] == pc_cover[["line2"]], ]
any(
x[["col1"]] <= match[["start"]] & x[["col2"]] >= match[["end"]]
)
}
any_multi_line_covers <- function() {
x <- pc_cover[pc_cover[["line1"]] < pc_cover[["line2"]], ]
any(
(x[["line1"]] < line_number & x[["line2"]] > line_number) |
(x[["line1"]] == line_number & x[["col1"]] <= match[["start"]]) |
(x[["line2"]] == line_number & x[["col2"]] >= match[["end"]])
)
}
any_single_line_covers() || any_multi_line_covers()
}
in_string <- function(source_expression, line_number, match) {
# do any of the strings in the parsed content contain the matched regex?
is_match_covered(source_expression, line_number, match, "STR_CONST")
}
|
9da9b2827e015a0d9645021f9a7d34f89d3458ed | 4970a3f8a4ca8a42a6fb22f454265691544f1810 | /man/ca125.Rd | 00764d84c8cfe6ed2158d36a2c919b466442dc61 | [] | no_license | Penncil/xmeta | d2ee5b14843d88f1b28c3e3755816269103cbbcd | 832b3f244648818cf2df2691ec5dd7bfa21bc810 | refs/heads/master | 2023-04-08T17:04:05.411553 | 2023-04-04T17:05:36 | 2023-04-04T17:05:36 | 249,091,838 | 4 | 1 | null | 2020-03-22T01:27:08 | 2020-03-22T01:27:07 | null | UTF-8 | R | false | false | 1,120 | rd | ca125.Rd | \name{ca125}
\alias{ca125}
\docType{data}
\title{Recurrent ovarian carcinoma study}
\description{A meta-analysis of 52 studies that were reported between January 1995 and November 2007.}
\format{
The data frame contains the following columns:
\describe{
\item{n}{total number of subjects}
\item{PiY}{disease prevalence}
\item{SeY}{true positive}
\item{n1}{subjects with disease}
\item{SpY}{true negative}
\item{n1}{health individuals}
}
}
\references{
Chen, Y., Liu, Y., Chu, H., Lee, M. and Schmid, C. (2017) A simple and robust method for multivariate meta-analysis of diagnostic test accuracy, Statistics in Medicine, 36, 105-121.
Gu P, Pan L, Wu S, Sun L, Huang G. CA 125, PET alone, PET-CT, CT and MRI in diagnosing recurrent ovarian carcinoma: a systematic review and meta-analysis. European journal of radiology 2009; 71(1):164-174.
}
\note{
The dataset \code{ca125} is used to conduct multivariate meta-analysis of diagnostic test accuracy.
}
\seealso{
\code{\link{mmeta}},
\code{\link{summary.mmeta}}
}
\examples{
data(ca125)
summary(ca125)
}
\keyword{datasets}
|
958c114dc04d6fa481e667ec4668e1e137915b18 | 192fd3dbc491d3c36bd9351f02cf9b5957ea56d1 | /R Packages/icdcoder/man/getChildrenICD10.Rd | ad9eb9ae010509c3ab5b4fb92858c2ea6bd38d37 | [] | no_license | ryerex/Research_and_Methods | d4d211defdbee83e47ecc72c59944c3f60a3bcca | 4010b75a5521c2c18ee624d48257ee99b29a7777 | refs/heads/master | 2023-05-26T01:54:17.048907 | 2020-08-05T16:14:29 | 2020-08-05T16:14:29 | 91,369,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 411 | rd | getChildrenICD10.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/utils.r
\name{getChildrenICD10}
\alias{getChildrenICD10}
\title{Get's all children for a single icd10 code (i.e., those under the
current hierarchy)}
\usage{
getChildrenICD10(icd10)
}
\arguments{
\item{icd10}{icd10 code}
}
\description{
Get's all children for a single icd10 code (i.e., those under the
current hierarchy)
}
|
1e4305d2a19b45010e456ff7c9dbbd5afca06a6b | 4f2e87dfbb407fc5f2510622ca048401de67adf3 | /diversification_analysis/mammals/orders/cetacea/run_TESS.R | 852af64299c987fa5aab3e4be3b6c662218883df | [] | no_license | naturalis/RiseAndFall | 2bd5d1f2c4afac54237ffafc5eefca87257c720b | fb612a80a2b68ee61ce228e3c6e47ced6a47458b | refs/heads/master | 2021-03-27T08:52:57.679485 | 2017-08-30T13:00:05 | 2017-08-30T13:00:05 | 52,791,083 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,622 | r | run_TESS.R | # Diversification rates using TESS
library(tools)
library(optparse)
library(TESS)
option_list <- list(
make_option("--i", type="integer", default=200000,
help=("Number of MCMC iterations to run [default %default]."),
metavar="Iterations"),
make_option("--rho", type="double", default=1,
help="Sampling fraction [default %default]",
metavar="rho"),
make_option("--E_shifts", type="integer", default=2,
help="Expected numebr of rate shifts [default %default]",
metavar="E_shifts"),
make_option("--E_me", type="integer", default=2,
help="Expected number of mass extinctions [default %default]",
metavar="E_me")
)
parser_object <- OptionParser(usage = "Usage: %prog [Options] [TREE]\n\n [TREE] Tree file with full path \n",
option_list=option_list, description="")
opt <- parse_args(parser_object, args = commandArgs(trailingOnly = TRUE), positional_arguments=TRUE)
setwd(dirname(basename(opt$args[1])))
tree <- read.nexus(opt$args[1])
times <- as.numeric( branching.times(tree) )
#plot(tree,show.tip.label=FALSE)
#ltt.plot(tree,log="y")
# Sampling fraction
samplingFraction <- opt$options$rho
print( opt$options$i)
out_dir = sprintf("tess_empirical_hp_%s", file_path_sans_ext(basename(opt$args[1])))
out_file = sprintf("%s_RTT.pdf", file_path_sans_ext(basename(opt$args[1])))
pdf(file=out_file,width=0.6*20, height=0.6*20)
# Estimated hyper-priors
tess.analysis(tree,
empiricalHyperPriors = TRUE,
samplingProbability = samplingFraction,
numExpectedRateChanges = opt$options$E_shifts,
numExpectedMassExtinctions = opt$options$E_me,
pMassExtinctionPriorShape1 = 1,
pMassExtinctionPriorShape2 = 1,
MAX_ITERATIONS = opt$options$i,
dir = out_dir)
# Plot output
library(TESS)
output <- tess.process.output(out_dir,
numExpectedRateChanges = opt$options$E_shifts,
numExpectedMassExtinctions = opt$options$E_me)
layout.mat <- matrix(1:6,nrow=3,ncol=2,byrow=TRUE)
layout(layout.mat)
tess.plot.output(output,las=2,
fig.types = c("speciation rates",
"speciation shift times",
"extinction rates",
"extinction shift times",
"mass extinction Bayes factors",
"mass extinction times"))
n<- dev.off() |
305de3bcce250360d56324e910cd9bdd5bd993fc | 0839e73a99c2113c2fee5cce53101efbc65ad8bc | /tests/testthat/test-fitmethod.R | 1882afd248a1c59558a9291dfd6bdb684f599572 | [] | no_license | harobledo/fixest | cf7abd46e07f3c6113958bc9b8de414bf13d7813 | 5af3eb32e019768932fed8be07f110e9efcd90b8 | refs/heads/master | 2023-09-02T01:18:49.311081 | 2021-09-12T17:08:02 | 2021-09-12T17:08:02 | 390,754,772 | 0 | 1 | null | 2021-11-22T18:50:20 | 2021-07-29T14:40:33 | R | UTF-8 | R | false | false | 981 | r | test-fitmethod.R | fitmethod.cases <- fitmethod_cases()[-c(4:6), ] # Eliminating ols with fmly (makes no sense)
with_parameters_test_that("feols.fit works properly",
{
fmla <- paste(y_dep, "-1 + x1 + x2 + x3", sep = " ~ ")
res <- feols.fit(y = ev_par(paste0("base$", y_dep)), X = base[, 2:4])
res_bis <- feols(fml = as.formula(fmla), data = base)
expect_equal(coef(res), coef(res_bis))
},
.cases = fitmethod.cases[1:3, ]
)
with_parameters_test_that("feglm.fit works properly",
{
fmla <- paste(y_dep, "-1 + x1 + x2 + x3", sep = " ~ ")
if (isTRUE(with_fmly)) {
res <- feglm.fit(y = ev_par(paste0("base$", y_dep)), X = base[, 2:4], family = fmly)
res_bis <- feglm(fml = as.formula(fmla), data = base, family = fmly)
} else {
res <- feglm.fit(y = ev_par(paste0("base$", y_dep)), X = base[, 2:4])
res_bis <- feglm(fml = as.formula(fmla), data = base)
}
expect_equal(coef(res), coef(res_bis))
},
.cases = fitmethod.cases[4:6, ]
)
|
84c16e8912632bcec92d866939ea239fcf42a8a0 | f7fa230362cd752d4e114b0423385e2ec59e9e8b | /diamonds_dataset.R | 7b19e0257a28d7b4229d6dc08e5f992659dbf163 | [] | no_license | shinjjune/R-Visual-ML-DL | 6aa79721b1e136231547b401e02685da58f1ff1d | ed0feff66f823317f7d52e8e30ae9d4aeb9a6c93 | refs/heads/master | 2020-05-31T13:05:34.298856 | 2020-01-22T08:58:21 | 2020-01-22T08:58:21 | 190,295,112 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 316 | r | diamonds_dataset.R | library("ggplot2")
str(diamonds)
library("ggplot2")
library("ggthemes")
ggplot(diamonds, aes(x=x, y=price, color=clarity)) +geom_point(alpha=0.03)+
geom_hline(yintercept=mean(diamonds$price), color="turquoise3",alpha=.8)+
guides(color=guide_legend(override.aes=list(alpha=1)))+
xlim(3,9)+theme_solarized_2()
|
a77bbea16c6f9782cbf7ac1a766b2a1b47995a5a | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gFileHasUriScheme.Rd | 16b89ae10e5e261f6213d3b0b184a840a89915fe | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 623 | rd | gFileHasUriScheme.Rd | \alias{gFileHasUriScheme}
\name{gFileHasUriScheme}
\title{gFileHasUriScheme}
\description{Checks to see if a \code{\link{GFile}} has a given URI scheme.}
\usage{gFileHasUriScheme(object, uri.scheme)}
\arguments{
\item{\verb{object}}{input \code{\link{GFile}}.}
\item{\verb{uri.scheme}}{a string containing a URI scheme.}
}
\details{This call does no blocking i/o.}
\value{[logical] \code{TRUE} if \code{\link{GFile}}'s backend supports the
given URI scheme, \code{FALSE} if URI scheme is \code{NULL},
not supported, or \code{\link{GFile}} is invalid.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
dfed6dd5f442d508835a02ef19b8109c446afe99 | 331e7816d55b9d3de50253d1b096e8707859a11c | /R/calibration.R | d84021eab550f8ab3fe3e37e39c4816ed09f37ac | [] | no_license | haroine/icarus | e515732a69d82614bb248807f882559188d291a7 | bd51ecf29bc7f07111219534dbd401f78c1daa84 | refs/heads/master | 2023-06-09T19:41:26.432469 | 2023-05-27T15:42:26 | 2023-05-27T15:42:26 | 38,872,499 | 10 | 5 | null | null | null | null | UTF-8 | R | false | false | 12,885 | r | calibration.R | # copyright (C) 2014-2023 A.Rebecq
# This function executes easy calibration with just data and matrix of margins
#########
#' Calibration on margins
#' @description
#' Performs calibration on margins with several methods and customizable parameters
#' @param data The dataframe containing the survey data
#' @param marginMatrix The matrix giving the margins for each column variable included
#' in the calibration problem
#' @param colWeights The name of the column containing the initial weights in the survey
#' dataframe
#' @param method The method used to calibrate. Can be "linear", "raking", "logit"
#' @param bounds Two-element vector containing the lower and upper bounds for bounded methods
#' ("logit")
#' @param q Vector of q_k weights described in Deville and Sarndal (1992)
#' @param costs The penalized calibration method will be used, using costs defined by this
#' vector. Must match the number of rows of marginMatrix. Negative of non-finite costs are given
#' an infinite cost (coefficient of C^-1 matrix is 0)
#' @param gap Only useful for penalized calibration. Sets the maximum gap between max and min
#' calibrated weights / initial weights ratio (and thus is similar to the "bounds"
#' parameter used in regular calibration)
#' @param popTotal Precise the total population if margins are defined by relative value in
#' marginMatrix (percentages)
#' @param pct If TRUE, margins for categorical variables are considered to
#' be entered as percentages. popTotal must then be set. (FALSE by default)
#' @param scale If TRUE, stats (including bounds) on ratio calibrated weights / initial weights are
#' done on a vector multiplied by the weighted non-response ratio (ratio population total /
#' total of initial weights). Has same behavior as "ECHELLE=0" in Calmar.
#' @param description If TRUE, output stats about the calibration process as well as the
#' graph of the density of the ratio calibrated weights / initial weights
#' @param maxIter The maximum number of iterations before stopping
#' @param check performs a few check about the dataframe. TRUE by default
#' @param calibTolerance Tolerance for the distance to an exact solution.
#' Could be useful when there is a huge number of margins as the risk of
#' inadvertently setting incompatible constraints is higher. Set to 1e-06 by default.
#' @param uCostPenalized Unary cost by which every cost is "costs" column is multiplied
#' @param lambda The initial ridge lambda used in penalized calibration. By default, the initial
#' lambda is automatically chosen by the algorithm, but you can speed up the search for the optimum
#' if you already know a lambda close to the lambda_opt corresponding to the gap you set. Be careful,
#' the search zone is reduced when a lambda is set by the user, so the program may not converge
#' if the lambda set is too far from the lambda_opt.
#' @param precisionBounds Only used for calibration on minimum bounds. Desired precision
#' for lower and upper reweighting factor, both bounds being as close to 1 as possible
#' @param forceSimplex Only used for calibration on tight bounds.Bisection algorithm is used
#' for matrices whose size exceed 1e8. forceSimplex = TRUE forces the use of the simplex algorithm
#' whatever the size of the problem (you might want to set this parameter to TRUE if you
#' have a large memory size)
#' @param forceBisection Only used for calibration on tight bounds. Forces the use of the bisection
#' algorithm to solve calibration on tight bounds
#' @param colCalibratedWeights Deprecated. Only used in the scope of calibration function
#' @param exportDistributionImage File name to which the density plot shown when
#' description is TRUE is exported. Requires package "ggplot2"
#' @param exportDistributionTable File name to which the distribution table of before/after
#' weights shown when description is TRUE is exported. Requires package "xtable"
#'
#' @examples
#' N <- 300 ## population total
#' ## Horvitz Thompson estimator of the mean: 1.666667
#' weightedMean(data_employees$movies, data_employees$weight, N)
#' ## Enter calibration margins:
#' mar1 <- c("category",3,80,90,60)
#' mar2 <- c("sex",2,140,90,0)
#' mar3 <- c("department",2,100,130,0)
#' mar4 <- c("salary", 0, 470000,0,0)
#' margins <- rbind(mar1, mar2, mar3, mar4)
#' ## Compute calibrated weights with raking ratio method
#' wCal <- calibration(data=data_employees, marginMatrix=margins, colWeights="weight"
#' , method="raking", description=FALSE)
#' ## Calibrated estimate: 2.471917
#' weightedMean(data_employees$movies, wCal, N)
#'
#' @references Deville, Jean-Claude, and Carl-Erik Sarndal. "Calibration estimators in survey sampling."
#' Journal of the American statistical Association 87.418 (1992): 376-382.
#' @references Bocci, J., and C. Beaumont. "Another look at ridge calibration."
#' Metron 66.1 (2008): 5-20.
#' @references Vanderhoeft, Camille. Generalised calibration at statistics Belgium: SPSS Module G-CALIB-S and current practices.
#' Inst. National de Statistique, 2001.
#' @references Le Guennec, Josiane, and Olivier Sautory. "Calmar 2: Une nouvelle version
#' de la macro calmar de redressement d'echantillon par calage." Journees de Methodologie Statistique,
#' Paris. INSEE (2002).
#' @return column containing the final calibrated weights
#'
#' @export
calibration = function(data, marginMatrix, colWeights, method="linear", bounds=NULL, q=NULL
, costs=NULL, gap=NULL, popTotal=NULL, pct=FALSE, scale=NULL, description=TRUE
, maxIter=2500, check=TRUE, calibTolerance=1e-06
, uCostPenalized=1, lambda=NULL
, precisionBounds=1e-4, forceSimplex=FALSE, forceBisection=FALSE
, colCalibratedWeights, exportDistributionImage=NULL, exportDistributionTable=NULL) {
## Deprecate an argument that is only used in the scope
## of this function
if (!missing(colCalibratedWeights)) {
warning("argument colCalibratedWeights is deprecated; now private
and defaults to 'calWeights'",
call. = FALSE)
}
colCalibratedWeights <- "calWeights"
# By default, scale is TRUE when popTotal is not NULL, false otherwise
if(is.null(popTotal)) {
scale <- FALSE
} else {
scale <- TRUE
}
if(check) {
## Check if all weights are not NA and greater than zero.
checkWeights <- as.numeric(data.matrix(data[colWeights]))
if( length(checkWeights) <= 0 ) {
stop("Weights column has length zero")
}
if( any((is.na(checkWeights)) ) ) {
stop("Some weights are NA")
}
if( any((checkWeights <= 0) ) ) {
stop("Some weights are negative or zero")
}
# Check NAs on calibration variables
matrixTestNA = missingValuesMargins(data, marginMatrix)
testNA = as.numeric(matrixTestNA[,2])
if(sum(testNA) > 0) {
print(matrixTestNA)
stop("NAs found in calibration variables")
}
# check if number of modalities in calibration variables matches marginMatrix
if(!checkNumberMargins(data, marginMatrix)) stop("Error in number of modalities.")
# check that method is specified
if(is.null(method)) {
warning('Method not specified, raking method selected by default')
method <- "raking"
}
## Basic checks on vector q:
if(!is.null(q)) {
if( length(q) != nrow(data) ) {
stop("Vector q must have same length as data")
}
if(!is.null(costs)) {
stop("q weights not supported with penalized calibration yet")
}
if(method == "min") {
stop("q weights not supported with calibration on min bounds yet")
}
}
}
marginCreation <- createFormattedMargins(data, marginMatrix, popTotal, pct)
matrixCal = marginCreation[[2]]
formattedMargins = marginCreation[[1]]
# Same rule as in "Calmar" for SAS : if scale is TRUE,
# calibration is done on weights adjusted for nonresponse
# (uniform adjustment)
weights <- as.numeric(data.matrix(data[colWeights]))
if(scale) {
if(is.null(popTotal)) {
stop("When scale is TRUE, popTotal cannot be NULL")
}
weights <- weights*(popTotal / sum(data.matrix(data[colWeights])) )
}
if(is.null(costs)) {
g <- NULL
if( (is.numeric(bounds)) || (method != "min") ) {
g <- calib(Xs=matrixCal, d=weights, total=formattedMargins, q=q,
method=method, bounds=bounds, maxIter=maxIter, calibTolerance=calibTolerance)
} else {
if( (any(identical(bounds,"min"))) || (method == "min")) {
g <- minBoundsCalib(Xs=matrixCal, d=weights, total=formattedMargins
, q=rep(1,nrow(matrixCal)), maxIter=maxIter, description=description, precisionBounds=precisionBounds, forceSimplex=forceSimplex, forceBisection=forceBisection)
}
}
if(is.null(g)) {
stop(paste("No convergence in", maxIter, "iterations."))
}
data[colCalibratedWeights] = g*weights
} else {
# Forbid popTotal null when gap is selected
if(!is.null(gap) && is.null(popTotal)) {
warning("popTotal NULL when gap is selected is a risky setting !")
}
## Format costs
costsFormatted <- formatCosts(costs, marginMatrix, popTotal)
wCal = penalizedCalib(Xs=matrixCal, d=weights, total=formattedMargins, method=method
, bounds=bounds, costs=costsFormatted, uCostPenalized=uCostPenalized
, maxIter=maxIter, lambda=lambda, gap=gap)
data[colCalibratedWeights] = data.matrix(wCal)
g = wCal / weights
}
if(description) {
writeLines("")
writeLines("################### Summary of before/after weight ratios ###################")
}
# popTotalComp is popTotal computed from sum of calibrated weights
popTotalComp <- sum(data[colCalibratedWeights])
weightsRatio = g
if(description) {
writeLines(paste("Calibration method : ",method, sep=""))
if(! (method %in% c("linear","raking")) && ! is.null(bounds) ) {
if(is.numeric(bounds)) {
writeLines(paste("\t L bound : ",bounds[1], sep=""))
writeLines(paste("\t U bound : ",bounds[2], sep=""))
}
if( (any(identical(bounds,"min"))) || (method == "min") ) {
writeLines(paste("\t L bound : ",round(min(g),4), sep=""))
writeLines(paste("\t U bound : ",round(max(g),4), sep=""))
}
}
writeLines(paste("Mean : ",round(mean(weightsRatio),4), sep=""))
quantileRatio <- round(stats::quantile(weightsRatio, probs=c(0,0.01,0.1,0.25,0.5,0.75,0.9,0.99,1)),4)
print(quantileRatio)
}
## Export in TeX
if(!is.null(exportDistributionTable)) {
if (!requireNamespace("xtable", quietly = TRUE)) {
stop("Package xtable needed for exportDistributionTable to work. Please install it.",
call. = FALSE)
}
# Linear or raking ratio
if(is.null(bounds)) {
newNames <- names(quantileRatio)
newNames <- c(newNames,"Mean")
statsRatio <- c(quantileRatio,mean(quantileRatio))
names(statsRatio) <- newNames
} else {
newNames <- names(quantileRatio)
newNames <- c("L",newNames,"U","Mean")
statsRatio <- c(bounds[1],quantileRatio,bounds[2],mean(quantileRatio))
names(statsRatio) <- newNames
}
latexQuantiles <- xtable::xtable(as.data.frame(t(statsRatio)))
# Notice that there is one extra column in align(latexQuantiles)
# since we haven't specified yet to exclide rownames
if(is.null(bounds)) {
xtable::align(latexQuantiles) <- "|c|ccccccccc||c|"
} else {
xtable::align(latexQuantiles) <- "|c|c|ccccccccc|c||c|"
}
print(latexQuantiles, include.rownames = FALSE, include.colnames = TRUE,
floating = FALSE, file=exportDistributionTable)
}
if(description) {
writeLines("")
writeLines("################### Comparison Margins Before/After calibration ###################")
print(calibrationMarginStats(data=data, marginMatrix=marginMatrix, popTotal=popTotal, pct=pct, colWeights=colWeights, colCalibratedWeights=colCalibratedWeights))
}
# Plot density of weights ratio
if(description) {
if(requireNamespace("ggplot2")) {
densityPlot = ggplot2::ggplot(data.frame(weightsRatio), ggplot2::aes(x=weightsRatio)) + ggplot2::geom_density(alpha=0.5, fill="#FF6666", size=1.25, adjust=2) + ggplot2::theme_bw()
print(densityPlot)
if(!is.null(exportDistributionImage)) {
ggplot2::ggsave(densityPlot, file=exportDistributionImage)
}
} else {
warning("Require package ggplot2 to plot weights ratio")
}
}
return(g*weights)
}
|
c955ec533fdcdb36da8b39bf4b3f85a71af02caa | 8e044458ebb6dcb51a711c51c33a9b54bbf9fd8e | /R/cox.ipw.r | 82a8d54f8928a76101d35c9f86ef1540cb8b86d4 | [] | no_license | scheike/timereg | a051e085423d3a3fd93db239c33210f60270d290 | 5807b130fbbda218e23d5c80ddc845973cae9dfc | refs/heads/master | 2023-01-28T19:38:18.528022 | 2023-01-17T06:28:35 | 2023-01-17T06:28:35 | 35,535,708 | 28 | 4 | null | null | null | null | UTF-8 | R | false | false | 4,523 | r | cox.ipw.r | #' Missing data IPW Cox
#'
#' Fits an Cox-Aalen survival model with missing data, with glm specification
#' of probability of missingness.
#'
#' Taylor expansion of Cox's partial likelihood in direction of glm parameters
#' using num-deriv and iid expansion of Cox and glm paramters (lava).
#'
#' @aliases cox.ipw summary.cox.ipw print.cox.ipw coef.cox.ipw
#' @param survformula a formula object with the response on the left of a '~'
#' operator, and the independent terms on the right as regressors. The response
#' must be a survival object as returned by the `Surv' function.
#'
#' Adds the prop() wrapper internally for using cox.aalen function for fitting
#' Cox model.
#' @param glmformula formula for "being" observed, that is not missing.
#' @param d data frame.
#' @param max.clust number of clusters in iid approximation. Default is all.
#' @param ipw.se if TRUE computes standard errors based on iid decompositon of
#' cox and glm model, thus should be asymptotically correct.
#' @param tie.seed if there are ties these are broken, and to get same break
#' the seed must be the same. Recommend to break them prior to entering the
#' program.
#' @return returns an object of type "cox.aalen". With the following arguments:
#' \item{iid}{iid decomposition.} \item{coef}{missing data estiamtes for
#' weighted cox. } \item{var}{robust pointwise variances estimates. }
#' \item{se}{robust pointwise variances estimates. } \item{se.naive}{estimate
#' of parametric components of model. } \item{ties}{list of ties and times
#' with random noise to break ties.} \item{cox}{output from weighted cox
#' model.}
#' @author Thomas Scheike
#' @references Paik et al.
#' @keywords survival
#' @examples
#'
#'
#' ### fit <- cox.ipw(Surv(time,status)~X+Z,obs~Z+X+time+status,data=d,ipw.se=TRUE)
#' ### summary(fit)
#'
#'
##' @export
cox.ipw <- function(survformula,glmformula,d=parent.frame(),max.clust=NULL,ipw.se=FALSE,tie.seed=100)
{ ## {{{
ggl <- glm(glmformula,family='binomial',data=d)
mat <- model.matrix(glmformula,data=d);
glmcovs <- attr(ggl$terms,"term.labels")
d$ppp <- predict(ggl,type='response')
### d1 <- d[,survcovs]
### dcc <- na.omit(d)
## {{{ checking and breaking ties
ties <- FALSE
survtimes <- all.vars(update(survformula,.~0))
if (length(survtimes)==2) {itime <- 1; time2 <- d[,survtimes[1]]; status <- d[,survtimes[2]]; }
if (length(survtimes)==3) {itime <- 2; time2 <- d[,survtimes[2]]; status <- d[,survtimes[3]]; }
jtimes <- time2[status==1];
dupli <- duplicated(jtimes)
if (sum(dupli)>0) {
set.seed(tie.seed)
jtimes[dupli] <- jtimes[dupli]+runif(sum(dupli))*0.01
time2[status==1] <- jtimes
d[,survtimes[itime]] <- time2
ties <- TRUE
}
## }}}
dcc <- d[ggl$y==1,]
ppp <- dcc$ppp
timeregsurvformula <- timereg.formula(survformula)
udca <- cox.aalen(timeregsurvformula,data=dcc,weights=1/ppp,n.sim=0,max.clust=max.clust)
### iid of beta for Cox model
coxiid <- udca$gamma.iid
if (ipw.se==TRUE) { ## {{{
###requireNamespace("lava"); requireNamespace("NumDeriv");
glmiid <- lava::iid(ggl)
mat <- mat[ggl$y==1,]
par <- coef(ggl)
coxalpha <- function(par)
{ ## {{{
rr <- mat %*% par
pw <- c(exp(rr)/(1+exp(rr)))
assign("pw",pw,envir=environment(survformula))
### if (coxph==FALSE)
ud <- cox.aalen(timeregsurvformula,data=dcc,weights=1/pw,beta=udca$gamma,Nit=1,n.sim=0,robust=0)
### else { ud <- coxph(survformula,data=dcc,weights=1/pw,iter.max=1,init=udca$gamma)
### ud <- coxph.detail(ud,data=dcc)
### }
ud$score
} ## }}}
DU <- numDeriv::jacobian(coxalpha,par,)
IDU <- udca$D2linv %*% DU
alphaiid <-t( IDU %*% t(glmiid))
###
iidfull <- alphaiid
###
iidfull[ggl$y==1,] <- coxiid + alphaiid[ggl$y==1,]
###
var2 <- t(iidfull) %*% iidfull
se <- cbind(diag(var2)^.5); colnames(se) <- "se"
} else { iidfull <- NULL; var2 <- NULL; se <- NULL} ## }}}
se.naive=coef(udca)[,3,drop=FALSE]; colnames(se.naive) <- "se.naive"
res <- list(iid=iidfull,coef=udca$gamma,var=var2,se=se,se.naive=se.naive,ties=list(ties=ties,time2=time2,cox=udca))
class(res) <- "cox.ipw"
return(res)
} ## }}}
##' @export
summary.cox.ipw <- function(object,digits=3,...)
{
tval <- object$coef/object$se
pval <- 2*(1-pnorm(abs(tval)))
res <- cbind(object$coef,object$se,object$se.naive,pval)
colnames(res) <- c("coef","se","se.naive","pval")
return(res)
}
##' @export
coef.cox.ipw<- function(object,digits=3,...)
{
summary.cox.ipw(object)
}
##' @export
print.cox.ipw <- function(x,...)
{
summary.cox.ipw(x)
}
|
ec641a0dd6725f44b1099decbe6b758fac2844b9 | c857e04d82512de09d7541bd99b6a8bd990a23f9 | /plot2.R | aff7ef905211cfcf6f50ec4093d93a26b67c8fb7 | [] | no_license | bisybackson/ExData_Plotting1 | cdfca1db79cf24b3a1f19d97d7a26b882ea24835 | 60e4a2057d13f4dce970839960b84e3d6d2f5c7e | refs/heads/master | 2021-06-05T12:19:59.256589 | 2016-10-31T16:21:55 | 2016-10-31T16:21:55 | 72,365,019 | 0 | 0 | null | 2016-10-30T17:43:27 | 2016-10-30T17:43:25 | null | UTF-8 | R | false | false | 557 | r | plot2.R | library(data.table)
library(dplyr)
library(lubridate)
consumption <- fread("household_power_consumption.txt",sep=";",header=TRUE,na.strings = "?",stringsAsFactors = FALSE)
consumption$DateTime <- dmy_hms(paste(consumption$Date, consumption$Time))
consumed <- filter(consumption, date(consumption$DateTime) == "2007-02-01" | date(consumption$DateTime) == "2007-02-02")
rm(consumption)
plot(Global_active_power ~ DateTime, consumed, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width = 480, height=480)
dev.off()
|
f231e496c6ebf214a3048c09e988f616a0ed96e5 | f4e2f6a4bd24753ce2522a19da2bc4d870d71a67 | /man/qsmoothData.Rd | a0319579315e518287390f60e0fbf70466cb3503 | [] | no_license | Feigeliudan01/qsmooth | 2be0fbfe65769a0fc0746a363b89542fa4c10ad0 | 58f23c44ef6a63fb40080231d4177e6eb74e62f2 | refs/heads/master | 2020-04-15T04:58:44.359789 | 2017-02-15T19:42:13 | 2017-02-15T19:42:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 550 | rd | qsmoothData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/methods.R
\docType{methods}
\name{qsmoothData}
\alias{qsmoothData}
\alias{qsmoothData,qsmooth-method}
\alias{qsmoothData.qsmooth}
\title{Accessors for the 'qsmoothData' slot of a qsmooth object.}
\usage{
qsmoothData(object)
\S4method{qsmoothData}{qsmooth}(object)
\S4method{qsmoothData}{qsmooth}(object)
}
\arguments{
\item{object}{a \code{qsmooth} object}
\item{...}{other}
}
\description{
Accessors for the 'qsmoothData' slot of a qsmooth object.
}
|
fd5a4612e500ecb7adde336520cfcdfa6fe08a98 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /bravo/inst/testfiles/colSumSq_matrix/libFuzzer_colSumSq_matrix/colSumSq_matrix_valgrind_files/1609959020-test.R | 362751728cf381d1f4e381cd973cba6c6506cc25 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 400 | r | 1609959020-test.R | testlist <- list(x = structure(c(5.04442971419527e+180, 3.1111403385324e+174, 1.51741194999287e-152, 2.71034819479614e-164, 1.93112249337219e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 6L)))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result) |
b5f935643c4d4b0ea19e2b6933884ea768d2268b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/psych/examples/omega.graph.Rd.R | 1b3acd2e5f6b444f86cd288a37b65aab22ceb118 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 423 | r | omega.graph.Rd.R | library(psych)
### Name: omega.graph
### Title: Graph hierarchical factor structures
### Aliases: omega.diagram omega.graph
### Keywords: multivariate
### ** Examples
#24 mental tests from Holzinger-Swineford-Harman
if(require(GPArotation) ) {om24 <- omega(Harman74.cor$cov,4) } #run omega
#
#example hierarchical structure from Jensen and Weng
if(require(GPArotation) ) {jen.omega <- omega(make.hierarchical())}
|
37c7bafae236854145fd87f88865ba0caabfb30e | f58d0680a57f8c62d0d10431f6a747e4b81eae19 | /R/add_tech_intensity.R | 6b951d6bd612dd4ccdfe64895a156f07d743ee96 | [] | no_license | awekim/WIODnet | fc99cb1cf1272bd73ac2ee5b90e44a2901b1a638 | 07966a9e856820667ef069be4a1f19592d7be3ae | refs/heads/master | 2020-08-13T15:25:17.406570 | 2019-10-14T12:31:47 | 2019-10-14T12:31:47 | 214,992,045 | 0 | 0 | null | 2019-10-14T08:42:33 | 2019-10-14T08:42:33 | null | UTF-8 | R | false | false | 815 | r | add_tech_intensity.R | #' Join the tech intensity data frame to the yearly WIOD
#'
#' @description Join the created technology intensity data frame to the
#' yearly raw WIOD wide table so that similar country and technology
#' intensity manufacturing industies can be aggregated.
#'
#' @param yearly.raw yearly raw data from the downloaded zip such as WIOT2011_October16_ROW.RData
#'
#' @return data frame
#'
#' @import dplyr
#'
#' @import magrittr
#'
addTechIntensity <- function(yearly.raw) {
industry.RNr <<- getTechIntensity(yearly.raw)
## changing the IndustryCode column wihtin the main df
yearly.raw %<>% left_join(industry.RNr, yearly.raw, by = c("RNr"))
## cleaning the data frame
yearly.raw %<>% select(-IndustryCode) %>% rename(IndustryCode = NewIndustryCode)
return(yearly.raw)
}
|
5929f02a9fd62ae39788f35ba10e2164035088b2 | 669cdf8cabbe9269122c8a2e012df7d4b06bd895 | /R/oauth-has-expired.r | 35fc3118898ba8b0843b3942cf189c262e8aa470 | [] | no_license | jdeboer/httr | 660e85f54bd29ccbb4431370eed344fd24809762 | eb27bd57decc5fb37de51d52466a4b229a11f793 | refs/heads/master | 2021-01-18T12:10:22.328699 | 2013-06-01T16:15:25 | 2013-06-01T16:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 424 | r | oauth-has-expired.r | #' Check if an OAuth 2.0 access token has past its use by.
#'
#' @param access_token the access token to check use_by of
#' @param margin the number of seconds before use_by to use as the expiration threshold (default of 5 seconds)
#' @family OAuth
#' @export
oauth2.0_has_expired <- function(access_token, margin = 30) {
stopifnot(
length(access_token$use_by) == 1
)
(access_token$use_by - margin) < Sys.time()
}
|
37b793b06bfa4716c877a8595f899d6f6457d150 | f6912c71c408619f65692f4824300e6be46c1b2f | /my-variation.R | 9e8a75f3d8acad27fe7fb8351d2251698e5dcfb1 | [] | no_license | Vassar-COGS282-2017/3-B1ngB0ng | 1c428572a28e391abd7e5bfb652d0d4d61977985 | 7b83a9888edf30fbb353333b66ac2ba888cd7c66 | refs/heads/master | 2021-07-11T12:01:21.963678 | 2017-10-06T02:07:55 | 2017-10-06T02:07:55 | 103,571,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,753 | r | my-variation.R | ######################################################
#DISCUSSION
######################################################
#My variation of the Schelling model adds property cost
#and income inequality to the mix. The model assumes that
#property in the center of the city is most expensive, and
#that cost decreases as you move outwards. It introduces
#several new parameters, but most of these exist to ensure
#there will be equilibrium, and have been tuned accordingly.
#For the most part, the innequality.ratio and the min.similarity
#are the parameters worth tweaking.
#What is most interesting about this model is that even a slight
#income advantage given to one color causes it to dominate the center
#assuming a min.similarity of 3/8. This model can help explain the way
#that income interacts with bais to enforce segregation and supress
#social mobility.
######################################################
#PARAMETERS
######################################################
rows <- 50 #number of rows in matrix
cols <- 50 #number of columns in matrix
#a distribution that determines how sharply property prices
#decay as you expand outward from the center of the map
property.dist <- function(x) {
return(x^(0.6))
}
proportion.group.1 <- .5 # proportion of red agents
empty <- .2 # proportion of grid that will be empty space
min.similarity <- 3/8 # minimum proportion of neighbors that are the same type to not move
inequality.ratio <- c(3,2) #a simple metric to set the income ineqality.
#Keep the first digit higher than the second to give red a higher income
#income distribution of the red population
distgrp1 <- quote(rbeta(1, inequality.ratio[1], inequality.ratio[2]))
#income distribution of the blue population
distgrp2 <- quote(rbeta(1, inequality.ratio[2], inequality.ratio[1]))
overprice.tolerance <- 0.2 #number between 0 and 1, determines how far above someone's means
# they are willing to live
underprice.tolerance <- 0.5 #number between 0 and 1, determines how far below someone's means
# they are willing to live
######################################################
#FUNCTIONS
######################################################
#_________create.grid_________###
# generates a rows x column matrix and randomly places the initial population
# values in the matrix are either 0, 1, or 2
# if 0, the space is empty
# 1 and 2 represent the two different groups
create.grid <- function(rows, cols, proportion.group.1, empty){
pop.size.group.1 <- (rows*cols)*(1-empty)*proportion.group.1
pop.size.group.2 <- (rows*cols)*(1-empty)*(1-proportion.group.1)
initial.population <- sample(c(
rep(1, pop.size.group.1),
rep(2, pop.size.group.2),
rep(0, (rows*cols)-pop.size.group.1-pop.size.group.2)
))
grid <- matrix(initial.population, nrow=rows, ncol=cols)
}
#_________visualize.grid_________####
# outputs a visualization of the grid, with red squares representing group 1,
# blue squares group 2, and black squares empty locations.
visualize.grid <- function(grid){
image(grid, col=c('black','red','blue'), xaxs=NULL, yaxs=NULL, xaxt='n', yaxt='n')
}
#_________empty.locations_________####
# returns all the locations in the grid that are empty
# output is an N x 2 array, with N equal to the number of empty locations
# the 2 columns contain the row and column of the empty location.
empty.locations <- function(grid){
return(which(grid==0, arr.ind=T))
}
#_________similarity.to.center_________####
# takes a grid and the center.val of that grid and returns
# the proportion of cells that are the same as the center,
# ignoring empty cells. the center.val must be specified
# manually in case the grid has an even number of rows or
# columns
similarity.to.center <- function(grid.subset, center.val){
if(center.val == 0){ return(NA) }
same <- sum(grid.subset==center.val) - 1
not.same <- sum(grid.subset!=center.val) - sum(grid.subset==0)
return(same/(same+not.same))
}
#_________segregation_________####
# computes the proportion of neighbors who are from the same group
segregation <- function(grid){
same.count <- 0
diff.count <- 0
for(row in 1:(nrow(grid)-1)){
for(col in 1:(ncol(grid)-1)){
if(grid[row,col] != 0 && grid[row+1,col+1] != 0){
if(grid[row,col] != grid[row+1,col+1]){
diff.count <- diff.count + 1
} else {
same.count <- same.count + 1
}
}
}
}
return(same.count / (same.count + diff.count))
}
#_________match.means_________####
#This is a fucntion that determines whether or not an individual
#will want to move because they are living either too far above
#or too far below their means
match.means <- function(row, col) {
return((property.grid[row,col] - overprice.tolerance < wealth.grid[row,col])&(wealth.grid[row,col] < property.grid[row,col] + underprice.tolerance))
}
#_________unhappy.agents_________####
# takes a grid and a minimum similarity threshold and computes
# a list of all of the agents that are unhappy with their
# current location. the output is N x 2, with N equal to the
# number of unhappy agents and the columns representing the
# location (row, col) of the unhappy agent in the grid
unhappy.agents <- function(grid, min.similarity){
grid.copy <- grid
for(row in 1:rows){
for(col in 1:cols){
similarity.score <- similarity.to.center(grid[max(0, row-1):min(rows,row+1), max(0,col-1):min(cols,col+1)], grid[row,col])
if(is.na(similarity.score)){
grid.copy[row,col] <- NA
} else {
#also call the match.means function to see if they need to move
grid.copy[row,col] <- similarity.score >= min.similarity & match.means(row,col)
}
}
}
return(which(grid.copy==FALSE, arr.ind = T))
}
#_________Assign property cost_________###
#This function creates the spread of property
#values assuming the property in the center is more
#expensive than the property on the outskirts
assign.property.cost <- function(rows, cols, current.index, dist) {
#calculate the center
center <- c((rows%/%2),(cols%/%2))
rowDif <- abs((center[1] - current.index[1]))
colDif <- abs((center[2] - current.index[2]))
unscalledDif <- rowDif + colDif
scalledDif <- 1- (dist(unscalledDif)/(dist(cols)))
#return function of the total difference
return(scalledDif)
}
#_________Create property matrix_________###
#assigns property cost to every square and stores
#it in a matrix
create.property.matrix <- function(rows, cols) {
#initialize the property grid
property.grid <<- matrix(nrow=rows, ncol=cols)
#mutate it
for(i in 1:rows) {
for(j in 1:cols) {
property.grid[i,j] <<- assign.property.cost(rows, cols, c(i,j), property.dist)
}
}
}
#Create a function to assign the disposable income based
#on the income distribution of the group
assign.wealth <- function(distgrp1, distgrp2, type) {
if(type==0){return(0)}
if(type==1){return(distgrp1)}
if(type==2){return(distgrp2)}
}
#Create income matrix
create.income.matrix <- function(distgrp1, distgrp2, matrix) {
#initialize the wealth grid
wealth.grid <<- matrix
for(i in 1:rows) {
for(j in 1:cols) {
wealth.grid[i,j] <<- assign.wealth(eval(distgrp1), eval(distgrp2), matrix[i,j])
}
}
}
#_________one.round_________####
# runs a single round of the simulation. the round starts by finding
# all of the unhappy agents and empty spaces. then unhappy agents are randomly
# assigned to a new empty location. a new grid is generated to reflect all of
# the moves that took place.
one.round <- function(grid, min.similarity){
#find all the empty spaces
empty.spaces <- empty.locations(grid)
#find all the unhappy agents
unhappy <- unhappy.agents(grid, min.similarity)
#shuffle empty spaces to create random assignement
empty.spaces <- empty.spaces[sample(1:nrow(empty.spaces)), ]
#go through the empty spaces list and assign an agent to each
for(i in 1:nrow(empty.spaces)) {
#if we run out of unhappy dudes we wanna end the for loop then
if(i > nrow(unhappy)){
break;
}
#make the switch by copying an unhappy index from grid to the corresponding
#index in empty spaces.
grid[empty.spaces[i, 1], empty.spaces[i,2]] <- grid[unhappy[i,1], unhappy[i,2]]
#make sure the wealth grid stays synced with the grid (sloppy code...mutate the global envrt)
#would have done differenctly if I was starting this now
wealth.grid[empty.spaces[i, 1], empty.spaces[i,2]] <<- wealth.grid[unhappy[i,1], unhappy[i,2]]
#
grid[unhappy[i,1], unhappy[i,2]] <- 0
#keep wealth grid synced
wealth.grid[unhappy[i,1], unhappy[i,2]] <<- 0
}
return(grid)
}
######################################################
#RUNNING THE SIMULATION
######################################################
done <- FALSE # a variable to keep track of whether the simulation is complete
grid <- create.grid(rows, cols, proportion.group.1, empty)
seg.tracker <- c(segregation(grid)) # keeping a running tally of the segregation scores for each round
create.property.matrix(rows, cols)
create.income.matrix(distgrp1, distgrp2, grid)
while(!done){
new.grid <- one.round(grid, min.similarity) # run one round of the simulation, and store output in new.grid
seg.tracker <- c(seg.tracker, segregation(grid)) # calculate segregation score and add to running list
if(all(new.grid == grid)){ # check if the new.grid is identical to the last grid
done <- TRUE # if it is, simulation is over -- no agents want to move
} else {
grid <- new.grid # otherwise, replace grid with new.grid, and loop again.
}
}
layout(1:2) # change graphics device to have two plots
visualize.grid(grid) # show resulting grid
plot(seg.tracker) # plot segregation over time
|
99a2e84ef992df3b7a9b251f219b08c1644c1993 | bc4c037c476201ec33a36d5476d5ce759bb60afe | /corr_and_LM.r | da487ad26c7f1a4f2859d40445768dd9ffcf9128 | [] | no_license | marlonglopes/RTests | f006e9996f0085de778c1b25c45dc10f27e78cf9 | 571cede14a857b745e196624d10941790b0585cc | refs/heads/master | 2020-05-29T15:41:39.256246 | 2016-11-07T09:48:29 | 2016-11-07T09:48:29 | 61,497,403 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,776 | r | corr_and_LM.r | # The vectors `A` and `B` have already been loaded
# Take a quick peek at both vectors
A = c(1,2,4)
B = c(3,6,7)
N = length(A)-1
# Save the differences of each vector element with the mean in a new variable
diff_A <- A - mean(A)
diff_B <- B - mean(B)
# Do the summation of the elements of the vectors and divide by N-1 in order to acquire the covariance between the two vectors
cov <- sum(diff_A*diff_B) / N
cov
cov(A,B) #R function
# Your workspace still contains the results of the previous exercise
# Square the differences that were found in the previous step
sq_diff_A <- diff_A^2
sq_diff_B <- diff_B^2
# Take the sum of the elements, divide them by N-1 and consequently take the square root to acquire the sample standard deviations
sd_A <- sqrt(sum(sq_diff_A)/(N))
sd_B <- sqrt(sum(sq_diff_B)/(N))
# Your workspace still contains the results of the previous exercise
# Combine all the pieces of the puzzle
correlation <- cov / (sd_A * sd_B)
correlation
# Check the validity of your result with the cor() command
cor(A,B) #R function
Regression
y = m + bx + e
y = linear function of x
m = interception
b = slope
e = error (residual)
Regression
Y = B0 + b1X1 + e
y = linear function of X1
B0 = interception = Regression CONSTANT
B1 = slope = Regression COEFFICIENT
e = error (residual)
###################################
#build regression equation
#For our simple regression model we will take Symptom Score (sym2) as our dependent variable
#and Impulse Control (ic2) as our independent variable. These are variables from the impact dataset, which is still loaded in your workspace.
#The regression equation would then be
# The dataset `impact` is already loaded.
# Calculate the required means, standard deviations and correlation coefficient
mean_sym2 <- mean(impact$sym2)
mean_ic2 <- mean(impact$ic2)
sd_sym2 <- sd(impact$sym2)
sd_ic2 <- sd(impact$ic2)
r <- cor(impact$ic2,impact$sym2)
# Calculate the slope
B_1 <- r * (sd_sym2 )/( sd_ic2 )
# Calculate the intercept
B_0 <- mean_sym2 - B_1 * mean_ic2
# Plot of ic2 against sym2
plot(impact$ic2,impact$sym2, main = "Scatterplot", ylab = "Symptoms", xlab = "Impulse Control")
# Add the regression line
abline(B_0, B_1, col = "red")
######Equivalente###############################################################################################
# The dataset impact is still loaded
# Construct the regression model
model_1 <- lm(sym2 ~ ic2, data = impact)
# Look at the results of the regression by using the summary function
summary(model_1)
# Create a scatter plot of Impulse Control against Symptom Score
plot(impact$sym2 ~ impact$ic2, main = "Scatterplot", ylab = "Symptoms", xlab = "Impulse Control")
# Add a regression line
abline(model_1, col = "red")
##################################################
# The impact dataset is already loaded
# Multiple Regression
model_2 <- lm(impact$sym2 ~ impact$ic2 + impact$vermem2)
# Examine the results of the regression
summary(model_2)
# Extract the predicted values
predicted <- fitted(model_2)
# Plotting predicted scores against observed scores
plot(predicted ~ impact$sym2, main = "Scatterplot", ylab = "Predicted Scores", xlab = "Observed Scores")
abline(lm(predicted ~ impact$sym2), col = "green")
##########################################
# The `impact` dataset is already loaded
# Create a linear regression with `ic2` and `vismem2` as regressors
model_1 <- lm(impact$sym2 ~ impact$ic2 + impact$vismem2)
# Extract the predicted values
predicted_1 <- fitted(model_1)
# Calculate the squared deviation of the predicted values from the observed values
deviation_1 <- (impact$sym2 - predicted_1)^2
# Sum the squared deviations
SSR_1 <- sum(deviation_1)
# Create a linear regression with `ic2` and `vermem2` as regressors
model_2 <- lm(impact$sym2 ~ impact$ic2 + impact$vermem2)
# Extract the predicted values
predicted_2 <- fitted(model_2)
# Calculate the squared deviation of the predicted values from the observed values
deviation_2 <- (impact$sym2 - predicted_2)^2
# Sum the squared deviations
SSR_2 <- sum(deviation_2)
#Compare the sum of squared residuals of these two models
SSR_1
SSR_2
######
Assumptions
assumption of homoscedasticity.
Anscombes quartet
Test
save residuals
plot against original x
should not have any relationship = Homosquedasticity
# Extract the residuals from the model
residual <- resid(model_2)
# Draw a histogram of the residuals
hist(residual)
# Extract the predicted symptom scores from the model
predicted <- fitted(model_2)
# Plot the predicted symptom scores against the residuals
plot(residual ~ predicted, main = "Scatterplot", xlab = "Model 2 Predicted Scores", ylab = "Model 2 Residuals")
abline(lm(residual ~ predicted), col="red")
|
f6f8b075bab024aca70beefa5d27b4adcc6b96c0 | 907af44f17d7246e7fb2b967adddb937aa021efb | /man/fslfill2.Rd | 22d0f372185dd661f030ff4c45d1444431c02868 | [] | no_license | muschellij2/fslr | 7a011ee50cfda346f44ef0167a0cb52420f67e59 | 53276dfb7920de666b4846d9d8fb05f05aad4704 | refs/heads/master | 2022-09-21T07:20:18.002654 | 2022-08-25T14:45:12 | 2022-08-25T14:45:12 | 18,305,477 | 38 | 23 | null | 2019-01-10T20:57:47 | 2014-03-31T19:35:03 | R | UTF-8 | R | false | true | 1,262 | rd | fslfill2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fslfill2.R
\name{fslfill2}
\alias{fslfill2}
\title{Fill image holes with dilation then erosion}
\usage{
fslfill2(
file,
outfile = NULL,
kopts = "",
remove.ends = TRUE,
refill = TRUE,
retimg = TRUE,
reorient = FALSE,
intern = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{file}{(character) filename of image to be filled}
\item{outfile}{(character) name of resultant filled file}
\item{kopts}{(character) Options passed for kernel before erosion/dilation}
\item{remove.ends}{(logical) Remove top and bottom dilation.}
\item{refill}{(logical) Run \code{\link{fslfill}} after dilation/erosion.}
\item{retimg}{(logical) return image of class nifti}
\item{reorient}{(logical) If retimg, should file be reoriented when read in?
Passed to \code{\link{readnii}}.}
\item{intern}{(logical) pass to \code{\link{system}}}
\item{verbose}{(logical) print out command before running}
\item{...}{additional arguments passed to \code{\link{readnii}}.}
}
\value{
character or logical depending on intern
}
\description{
This function calls \code{fslmaths} to dilate an image, then calls
it again to erode it.
}
\note{
This function binarizes the image before running.
}
|
e9751467e93ce01a2dde0a7c617cb01126fc3561 | 59004c819451c7f552159ec5b2ce500fa365d70d | /R/QRNLMM.R | f01f5d7008943421c662c28a9d11faf7703bab38 | [] | no_license | cran/qrNLMM | 168ca22144c733fa192ef9bda53e93917a46279b | ac1d52d97a4f81205151895054cd553a3c5fd608 | refs/heads/master | 2022-09-05T00:17:55.503771 | 2022-08-18T11:40:05 | 2022-08-18T11:40:05 | 30,884,909 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 22,114 | r | QRNLMM.R | QRNLMM = function(y,x,groups,initial,exprNL,covar=NA,p=0.5,
precision=0.0001,MaxIter=500,M=20,cp=0.25,
beta=NA,sigma=NA,Psi=NA,
show.convergence=TRUE,CI=95,
verbose=TRUE)
{
if(any(is.na(groups)==TRUE)) stop("There are some NA's values in groups")
if(length(y) != length(groups)) stop("groups does not match with the provided data. (length(y) != length(groups))")
resexp = validate_str(exprNL)
if(nchar(resexp)>0)
{
cat('\n')
cat('Some defined variables or maths expressions not recognized:\n')
cat('\n')
cat(resexp,'\n')
cat('\n')
cat('* For the NL function just "x", "covar", "fixed" and "random" can be defined.\n')
cat('\n')
cat('* For derivating the deriv R function recognizes the arithmetic operators +, -, *, / and ^, and the single-variable functions exp, log, sin, cos, tan, sinh, cosh, sqrt, pnorm, dnorm, asin, acos, atan, gamma, lgamma, digamma and trigamma, as well as psigamma for one or two arguments (but derivative only with respect to the first).')
stop(paste("Expression/s \"",resexp,"\" do/es not defined. More details above.",sep=""))
}
nj = c(as.data.frame(table(groups))[,2])
dqnc = countall(exprNL)
d = dqnc[1]
q = dqnc[2]
nc = dqnc[3]
if(all(is.na(covar)==FALSE)){
if(any(is.na(covar)==TRUE)) stop("There are some NA's values in covar")
covar = as.matrix(covar)
if(nc != dim(covar)[2]){stop("The number of declared covariates in exprNL must coincide with the column number of covar.")}
}
if(length(p)==1)
{
## Verify error at parameters specification
#No data
if( (length(x) == 0) | (length(y) == 0)) stop("All parameters must be provided.")
#Validating if exists NA's
if(sum(y[is.na(y)==TRUE]) > 0) stop("There are some NA's values in y")
if(sum(y[is.na(x)==TRUE]) > 0) stop("There are some NA's values in x")
if(any(is.na(covar)==TRUE) && all(is.na(covar))==FALSE) stop("There are some NA's values in covar")
#Validating dims data set
if(ncol(as.matrix(y)) > 1) stop("y must have just one column")
if( length(y) != sum(nj) ) stop("nj does not match with the provided data. (length(y) != sum(nj))")
if( length(y) != nrow(as.matrix(x)) ) stop("x variable does not have the same number of lines than y")
if(length(y) != nrow(as.matrix(covar)) && is.na(covar)==FALSE) stop("covar variable does not have the same number of lines than y")
if(is.character(exprNL)==FALSE && is.expression(exprNL)==FALSE) stop("exprNL must be of class expression or character.")
if(length(initial) != d) stop("The vector of initial parameter must have dimensions equal to the number of fixed effects declared in exprNL.")
if(is.numeric(initial) == FALSE) stop("The vector of initial parameter must be of class numeric.")
#Validating supports
if(p > 1 | p < 0) stop("p must be a real number in (0,1)")
if(precision <= 0) stop("precision must be a positive value (suggested to be small)")
if(MaxIter <= 0 |MaxIter%%1!=0) stop("MaxIter must be a positive integer value")
if(M <= 0 |M%%1!=0) stop("M must be a positive integer value >= 10")
if(cp > 1 | cp < 0) stop("cp must be a real number in [0,1]")
if(is.logical(show.convergence) == FALSE) stop("show.convergence must be TRUE or FALSE.")
#Matrix column labels
namesz <- ('b1')
if(q>1){
for(i in 2:q){namesz <- c(namesz, paste("b",i,sep=""))}
}
#No data
if(is.na(beta) == FALSE)
{if(length(beta) != d) stop("beta must have dimensions equal to the number of fixed effects declared in exprNL.")}
if(is.na(sigma) == FALSE)
{if(sigma <= 0 | length(sigma)!=1) stop("sigma must be a positive real number")}
if(is.na(Psi) == FALSE)
{
if(ncol(as.matrix(Psi)) != q | nrow(as.matrix(Psi)) != q) stop("Psi must be a square matrix of dims equal to the number of random effects declared in exprNL.")
if(det(Psi)<=0) stop("Psi must be a square symmetrical real posite definite matrix.")
}
exprNL0 = exprNL
inter = paste("function(x,fixed,random,covar=NA){resp = ",exprNL0,";return(resp)}",sep="")
nlmodel0 = nlmodel = eval(parse(text = inter))
if(nc > 0){
exprNL = gsub('covar\\[','covar\\[,',as.character(exprNL))
inter = paste("function(x,fixed,random,covar){resp = ",
exprNL,";return(resp)}",sep="")
nlmodel = eval(parse(text = inter))
}
#intial values
if(is.na(beta) == TRUE && is.na(sigma) == TRUE)
{
OPT = optim(par = initial,fn = minbeta,y=y,x=x,covar=covar,p=p,q=q,nlmodel=nlmodel)
beta = OPT$par
sigmae = (1/length(y))*OPT$value
}
if(is.na(beta) == TRUE && is.na(sigma) == FALSE)
{
OPT = optim(par = initial,fn = minbeta,y=y,x=x,covar=covar,p=p,q=q,nlmodel=nlmodel)
beta = OPT$par
}
if(is.na(beta) == FALSE && is.na(sigma) == TRUE)
{
OPT = optim(par = beta,fn = minbeta,y=y,x=x,covar=covar,p=p,q=q,nlmodel=nlmodel)
sigmae = (1/length(y))*OPT$value
}
if(is.na(Psi) == TRUE){Psi = diag(q)}
#Running the algorithm
out <- QSAEM_NL(y = y,x = x,nj = nj,initial = initial,exprNL = exprNL0,covar = covar,p = p,precision = precision,M=M,pc=cp,MaxIter=MaxIter,beta = beta,sigmae = sigmae,D=Psi,nlmodel=nlmodel0,d=d,q=q)
if(verbose){
cat('\n')
cat('---------------------------------------------------\n')
cat('Quantile Regression for Nonlinear Mixed Model\n')
cat('---------------------------------------------------\n')
cat('\n')
cat("Quantile =",p)
cat('\n')
cat("Subjects =",length(nj),";",'Observations =',sum(nj),
ifelse(sum(nj==nj[1])==length(nj),'; Balanced =',""),
ifelse(sum(nj==nj[1])==length(nj),nj[1],""))
cat('\n')
cat('\n')
cat('- Nonlinear function \n')
cat('\n')
cat('nlmodel = \n')
cat(as.character(inter))
cat('\n')
cat("return(resp)}")
cat('\n')
cat('\n')
cat('-----------\n')
cat('Estimates\n')
cat('-----------\n')
cat('\n')
cat('- Fixed effects \n')
cat('\n')
print(round(out$res$table,5))
cat('\n')
cat('sigma =',round(out$res$sigmae,5),'\n')
cat('\n')
cat('Random effects \n')
cat('\n')
cat('i) Weights \n')
print(head(round(out$res$weights,5)))
cat('\n')
cat('ii) Variance-Covariance Matrix \n')
dimnames(out$res$D) <- list(namesz,namesz)
print(round(out$res$D,5))
cat('\n')
cat('------------------------\n')
cat('Model selection criteria\n')
cat('------------------------\n')
cat('\n')
critFin <- c(out$res$loglik, out$res$AIC, out$res$BIC, out$res$HQ)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC","HQ"))
print(critFin)
cat('\n')
cat('-------\n')
cat('Details\n')
cat('-------\n')
cat('\n')
cat("Convergence reached? =",(out$res$iter < MaxIter))
cat('\n')
cat('Iterations =',out$res$iter,"/",MaxIter)
cat('\n')
cat('Criteria =',round(out$res$criterio,5))
cat('\n')
cat('MC sample =',M)
cat('\n')
cat('Cut point =',cp)
cat('\n')
cat("Processing time =",out$res$time,units(out$res$time))
}
if(show.convergence == TRUE)
{
cpl = cp*MaxIter
ndiag = (q*(1+q)/2)
npar = d+1+ndiag
labels = list()
for(i in 1:d){labels[[i]] = bquote(beta[.(i)])}
labels[[d+1]] = bquote(sigma)
for(i in 1:ndiag){labels[[i+d+1]] = bquote(psi[.(i)])}
par(mar=c(4, 4.5, 1, 0.5))
op <- par(mfrow=c(ifelse(npar%%3==0,npar%/%3,(npar%/%3)+1),3))
for(i in 1:npar)
{
plot.ts(out$conv$teta[i,],xlab="Iteration",ylab=labels[[i]])
abline(v=cpl,lty=2)
}
}
par(mfrow=c(1,1))
par(mar= c(5, 4, 4, 2) + 0.1)
fitted.values = rep(NA,sum(nj))
if(nc == 0){
for (j in 1:length(nj)){
pos = (sum(nj[1:j-1])+1):(sum(nj[1:j]))
rand = as.matrix(out$res$weights)[j,]
fitted.values[pos] = nlmodel(x = x[pos],
fixed = out$res$beta,
random = rand)
}
}else{
covar = as.matrix(covar)
for (j in 1:length(nj)){
pos = (sum(nj[1:j-1])+1):(sum(nj[1:j]))
rand = as.matrix(out$res$weights)[j,]
fitted.values[pos] = nlmodel(x = x[pos],
fixed = out$res$beta,
random = rand,
covar = covar[pos,,drop = FALSE])
}
}
res = list(p = p,
iter = out$res$iter,
criteria = out$res$criterio,
nlmodel = nlmodel,
beta = out$res$beta,
weights = out$res$weights,
sigma= out$res$sigmae,
Psi = out$res$D,
SE=out$res$EP,
table = out$res$table,
loglik=out$res$loglik,
AIC=out$res$AIC,
BIC=out$res$BIC,
HQ=out$res$HQ,
fitted.values = fitted.values,
residuals = fitted.values - y,
time = out$res$time)
par(mfrow=c(1,1))
par(mar= c(5, 4, 4, 2) + 0.1)
obj.out = list(conv=out$conv,res = res)
class(obj.out) = "QRNLMM"
return(obj.out)
}
else
{
p = sort(unique(p))
obj.out = vector("list", length(p))
## Verify error at parameters specification
#No data
if( (length(x) == 0) | (length(y) == 0)) stop("All parameters must be provided.")
#Validating if exists NA's
if(sum(y[is.na(y)==TRUE]) > 0) stop("There are some NA's values in y")
if(sum(y[is.na(x)==TRUE]) > 0) stop("There are some NA's values in x")
if(any(is.na(covar)==TRUE) && all(is.na(covar))==FALSE) stop("There are some NA's values in covar")
#Validating dims data set
if(ncol(as.matrix(y)) > 1) stop("y must have just one column")
if( length(y) != sum(nj) ) stop("nj does not match with the provided data. (length(y) != sum(nj))")
if( length(y) != nrow(as.matrix(x)) ) stop("x variable does not have the same number of lines than y")
if(length(y) != nrow(as.matrix(covar)) && is.na(covar)==FALSE) stop("covar variable does not have the same number of lines than y")
if(is.character(exprNL)==FALSE && is.expression(exprNL)==FALSE) stop("exprNL must be of class expression or character.")
if(length(initial) != d) stop("The vector of initial parameter must have dimensions equal to the number of fixed effects declared in exprNL.")
if(is.numeric(initial) == FALSE) stop("The vector of initial parameter must be of class numeric.")
#Validating supports
if(all(p > 0 && p < 1) == FALSE) stop("p vector must contain real values in (0,1)")
if(precision <= 0) stop("precision must be a positive value (suggested to be small)")
if(MaxIter <= 0 |MaxIter%%1!=0) stop("MaxIter must be a positive integer value")
if(M <= 0 |M%%1!=0) stop("M must be a positive integer value >= 10")
if(cp > 1 | cp < 0) stop("cp must be a real number in [0,1]")
if(is.logical(show.convergence) == FALSE) stop("show.convergence must be TRUE or FALSE.")
#Matrix column labels
namesz <- ('b1')
if(q>1){
for(i in 2:q){namesz <- c(namesz, paste("b",i,sep=""))}
}
#pb2 = tkProgressBar(title = "QRNLMM for several quantiles",
# min = 0,max = length(p), width = 300)
cat("\n")
pb2 <- progress_bar$new(
format = ":what [:bar] :percent eta: :eta \n",
total = length(p),
clear = TRUE,
width= 60,
show_after = 0)
#pb2$tick(len = 0,tokens = list(what = "QRNLMM: Preparing"))
#No data
if(is.na(beta) == FALSE)
{if(length(beta) != d) stop("beta must have dimensions equal to the number of fixed effects declared in exprNL.")}
if(is.na(sigma) == FALSE)
{if(sigma <= 0 | length(sigma)!=1) stop("sigma must be a positive real number")}
#Load required libraries
if(is.na(Psi) == FALSE)
{
if(ncol(as.matrix(Psi)) != q | nrow(as.matrix(Psi)) != q) stop("Psi must be a square matrix of dims equal to the number of random effects declared in exprNL.")
if(det(Psi)<=0) stop("Psi must be a square symmetrical real posite definite matrix.")
}
exprNL0 = exprNL
inter = paste("function(x,fixed,random,covar=NA){resp = ",exprNL0,";return(resp)}",sep="")
nlmodel0 = nlmodel = eval(parse(text = inter))
if(nc > 0){
exprNL = gsub('covar\\[','covar\\[,',as.character(exprNL))
inter = paste("function(x,fixed,random,covar){resp = ",
exprNL,";return(resp)}",sep="")
nlmodel = eval(parse(text = inter))
}
#nlmodel = eval(parse(text = paste("function(x,fixed,random,covar=NA){resp = ",exprNL,";return(resp)}",sep="")))
#intial values
if(is.na(beta) == TRUE && is.na(sigma) == TRUE)
{
OPT = optim(par = initial,fn = minbeta,y=y,x=x,covar=covar,p=p[1],q=q,nlmodel=nlmodel)
beta = OPT$par
sigmae = (1/length(y))*OPT$value
}
if(is.na(beta) == TRUE && is.na(sigma) == FALSE)
{
OPT = optim(par = initial,fn = minbeta,y=y,x=x,covar=covar,p=p[1],q=q,nlmodel=nlmodel)
beta = OPT$par
}
if(is.na(beta) == FALSE && is.na(sigma) == TRUE)
{
OPT = optim(par = beta,fn = minbeta,y=y,x=x,covar=covar,p=p[1],q=q,nlmodel=nlmodel)
sigmae = (1/length(y))*OPT$value
}
if(is.na(Psi) == TRUE){Psi = diag(q)}
for(k in 1:length(p))
{
#setTkProgressBar(pb2, k-1, label=paste("Running quantile ",p[k]," - ",k-1,"/",length(p),sep = ""))
#Running the algorithm
pb2$tick(k-1,tokens = list(what = "QRNLMM: Total progress "))
out <- QSAEM_NL(y = y,x = x,nj = nj,initial = initial,exprNL = exprNL0,covar = covar,p = p[k],precision = precision,M=M,pc=cp,MaxIter=MaxIter,beta = beta,sigmae = sigmae,D=Psi,nlmodel=nlmodel0,d=d,q=q)
if(verbose){
cat('\n')
cat('---------------------------------------------------\n')
cat('Quantile Regression for Nonlinear Mixed Model\n')
cat('---------------------------------------------------\n')
cat('\n')
cat("Quantile =",p[k])
cat('\n')
cat("Subjects =",length(nj),";",'Observations =',sum(nj),
ifelse(sum(nj==nj[1])==length(nj),'; Balanced =',""),
ifelse(sum(nj==nj[1])==length(nj),nj[1],""))
cat('\n')
cat('\n')
cat('- Nonlinear function \n')
cat('\n')
cat('nlmodel = \n')
cat(as.character(inter))
cat('\n')
cat("return(resp)}")
cat('\n')
cat('\n')
cat('-----------\n')
cat('Estimates\n')
cat('-----------\n')
cat('\n')
cat('- Fixed effects \n')
cat('\n')
print(round(out$res$table,5))
cat('\n')
cat('sigma =',round(out$res$sigmae,5),'\n')
cat('\n')
cat('Random effects \n')
cat('\n')
cat('i) Weights \n')
print(head(round(out$res$weights,5)))
cat('\n')
cat('ii) Variance-Covariance Matrix \n')
dimnames(out$res$D) <- list(namesz,namesz)
print(round(out$res$D,5))
cat('\n')
cat('------------------------\n')
cat('Model selection criteria\n')
cat('------------------------\n')
cat('\n')
critFin <- c(out$res$loglik, out$res$AIC, out$res$BIC, out$res$HQ)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC","HQ"))
print(critFin)
cat('\n')
cat('-------\n')
cat('Details\n')
cat('-------\n')
cat('\n')
cat("Convergence reached? =",(out$res$iter < MaxIter))
cat('\n')
cat('Iterations =',out$res$iter,"/",MaxIter)
cat('\n')
cat('Criteria =',round(out$res$criterio,5))
cat('\n')
cat('MC sample =',M)
cat('\n')
cat('Cut point =',cp)
cat('\n')
cat("Processing time =",out$res$time,units(out$res$time))
cat('\n')
}
fitted.values = rep(NA,sum(nj))
if(nc == 0){
for (j in 1:length(nj)){
pos = (sum(nj[1:j-1])+1):(sum(nj[1:j]))
rand = as.matrix(out$res$weights)[j,]
fitted.values[pos] = nlmodel(x = x[pos],
fixed = out$res$beta,
random = rand)
}
}else{
covar = as.matrix(covar)
for (j in 1:length(nj)){
pos = (sum(nj[1:j-1])+1):(sum(nj[1:j]))
rand = as.matrix(out$res$weights)[j,]
fitted.values[pos] = nlmodel(x = x[pos],
fixed = out$res$beta,
random = rand,
covar = covar[pos,,drop = FALSE])
}
}
res = list(p = p[k],
iter = out$res$iter,
criteria = out$res$criterio,
nlmodel = nlmodel,
beta = out$res$beta,
weights = out$res$weights,
sigma= out$res$sigmae,
Psi = out$res$D,
SE=out$res$EP,
table = out$res$table,
loglik=out$res$loglik,
AIC=out$res$AIC,
BIC=out$res$BIC,
HQ=out$res$HQ,
fitted.values = fitted.values,
residuals = fitted.values - y,
time = out$res$time)
obj.outk = list(conv=out$conv,res = res)
obj.out[[k]] = obj.outk
beta = out$res$beta
sigmae = out$res$sigmae
Psi = out$res$D
}
pb2$terminate()
#close(pb2)
par(mfrow=c(1,1))
betas = eps = matrix(NA,length(p),d+1)
for (i in 1:length(p))
{
j = p[i]
betas[i,] = rbind(obj.out[[i]]$res$beta,obj.out[[i]]$res$sigma)
eps[i,] = obj.out[[i]]$res$SE[1:(d+1)]
}
LIMSUP = t(betas + qnorm(1-((1-(CI/100))/2))*eps)
LIMINF = t(betas - qnorm(1-((1-(CI/100))/2))*eps)
labels = list()
for(i in 1:d){labels[[i]] = bquote(beta[.(i)])}
labels[[d+1]] = bquote(sigma)
par(mar=c(4, 4.5, 1, 0.5))
op <- par(mfrow=c(ifelse((d+1)%%2==0,(d+1)%/%2,((d+1)%/%2)+1),2),oma=c(0,0,2,0))
for(i in 1:(d+1)){
if(length(p)<4)
{
ymin = min(betas[,i],LIMSUP[i,],LIMINF[i,])
ymax = max(betas[,i],LIMSUP[i,],LIMINF[i,])
plot(p,betas[,i],ylim=c(ymin-2*mean(eps[,i]),ymax+2*mean(eps[,i])),xaxt='n', type='n',xlab='quantiles',ylab=labels[[i]])
axis(side=1, at=p)
polygon(c(p,rev(p)),c(LIMSUP[i,],rev(LIMINF[i,])), col = "gray50", border = NA)
lines(p,betas[,i])
lines(p,LIMSUP[i,])
lines(p,LIMINF[i,])
}
else
{
smoothingSpline = smooth.spline(p, betas[,i], spar=0.1)
smoothingSplineU = smooth.spline(p, betas[,i]+(qnorm(1-((1-(CI/100))/2)))*eps[,i], spar=0.1)
smoothingSplineL = smooth.spline(p, betas[,i]-(qnorm(1-((1-(CI/100))/2)))*eps[,i], spar=0.1)
plot(p, betas[,i], type='n',xaxt='n',xlab='quantiles',lwd=2,ylim=c(min(smoothingSplineL$y)-2*mean(eps[,i]),max(smoothingSplineU$y)+2*mean(eps[,i])),ylab=labels[[i]])
axis(side=1, at=p)
#create filled polygon in between the lines
polygon(c(smoothingSplineL$x,rev(smoothingSplineU$x)),c(smoothingSplineU$y,rev(smoothingSplineL$y)), col = "gray50", border = NA)
#plot lines for high and low range
lines(p, betas[,i], type='l',lwd=1)
lines(smoothingSplineU,lwd=1)
lines(smoothingSplineL,lwd=1)
}
}
title("Point estimative and 95% CI for model parameters", outer=TRUE)
if(show.convergence=="TRUE")
{
cpl = cp*MaxIter
ndiag = (q*(1+q)/2)
npar = d+1+ndiag
labels = list()
for(i in 1:d){labels[[i]] = bquote(beta[.(i)])}
labels[[d+1]] = bquote(sigma)
for(i in 1:ndiag){labels[[i+d+1]] = bquote(psi[.(i)])}
for(k in 1:length(p))
{
cat('\n')
cat('-------------------------------\n')
cat("Press [ENTER] to see next plot:")
line <- readline()
par(mar=c(4, 4.5, 1, 0.5))
op <- par(mfrow=c(ifelse(npar%%3==0,npar%/%3,(npar%/%3)+1),3),oma=c(0,0,2,0))
for(i in 1:npar)
{
plot.ts(obj.out[[k]]$conv$teta[i,],xlab="Iteration",ylab=labels[[i]])
abline(v=cpl,lty=2)
}
title(paste("Convergence plots for quantile",p[k]), outer=TRUE)
}
}
par(mfrow=c(1,1))
par(mar= c(5, 4, 4, 2) + 0.1)
class(obj.out) = "QRNLMM"
return(obj.out)
}
par(mfrow=c(1,1))
par(mar= c(5, 4, 4, 2) + 0.1)
}
|
6c596e132d33148ba2b4f06f7394912bf6dbe3e2 | e7cd1e33a146924f27531249060780ea160b1b1e | /variant_analysis/LOH_analysis.R | b99ee52bbd0d634de9df2ea520565e848ead190c | [] | no_license | rj67/germVar | a92abbf86f4f7d6118202735e33e88f8c689c83a | 612d90a480eac286644e9ddd9626cfca2036aada | refs/heads/master | 2016-09-10T14:34:04.348001 | 2015-04-05T16:35:09 | 2015-04-05T16:35:09 | 16,005,171 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,301 | r | LOH_analysis.R | ASCAT_stat <- read.table("Results/ASCAT_out/ploidy_purity.txt", fill=T)
colnames(ASCAT_stat) <- c("Patient", "ploidy", "purity")
ASCAT_stat <- subset(ASCAT_stat, !is.na(purity))
seq_lengths<- data.frame(CHROM = c(seq(1:22), "x"), length=seqlengths(seqinfo(nonsyn_GT))[1:23])
parse_ascat <- function(Patient){
ascat_file <- paste("Results/ASCAT_out/", Patient, ".segments_withSize.txt", sep="")
ascat_df <- read.csv(ascat_file, strip.white = T)
# some file have the "Different chromosome boundaries!" error message, remove them
ascat_df <- subset(ascat_df, !grepl("Different", Segment..))
ascat_grange <- GRanges( seqnames = Rle(ascat_df$chromosome),
ranges = IRanges(start = ascat_df$startBP, end = ascat_df$endBP, names = ascat_df$Segment..))
return(list(df=ascat_df, grange=ascat_grange))
}
ASCAT <-lapply(ASCAT_stat$Patient, parse_ascat)
A
tmp<-subset(nsSNP_GT, uid %in% subset(nsSNP_rv, pred_patho=="tier1")$uid)
tmp<-plyr::join(tmp, nsSNP_vr[c("uid", "aa_uid")])
tmp$uid <- as.character(levels(tmp$uid)[tmp$uid])
tmp2<-mapply(query_ascat, tmp$Patient, tmp$uid)
tmp2<-as.data.frame(t(tmp2))
df<-ASCAT[[25]]$df
plot_ASCAT <- function(df){
library(grid)
# reorder chrmomosome level
df$chromosome <- factor(df$chromosome, levels=c(seq(1, 22), "X"))
# for small segments, extend both end to achieve at least size_thresh
size_thresh <- 500000
df$startBP[df$size<size_thresh] <- df$startBP[df$size<size_thresh] - (size_thresh -df$size[df$size<size_thresh])/2
df$endBP[df$size<size_thresh] <- df$endBP[df$size<size_thresh] + (size_thresh -df$size[df$size<size_thresh])/2
# cap nA at threshold
nA_thresh <- 5
# offset nA nB for plotting
nAB_offset <- 0.09
df$nA[df$nA>nA_thresh] <- nA_thresh
df$nA <- df$nA - nAB_offset
df$nB <- df$nB + nAB_offset
p <- ggplot(df) + geom_segment(aes(x=startBP, xend=endBP, y=nA, yend=nA), size=5, color="#d7191c")
p <- p + geom_segment(aes(x=startBP, xend=endBP, y=nB, yend=nB), size=5, color="#2c7bb6")
p <- p + facet_grid(.~chromosome, scales="free_x", space="free")
p <- p + theme_few() + ylab("") + xlab("") + theme(axis.text.x=element_blank())
p <- p + scale_y_continuous(breaks=seq(0, 5)) + scale_x_continuous(breaks=NULL)
p
}
ggsave(filename="tmp.png", width=10, height=5)
|
37a8a24705eb6726e724a8e21ca7f98a41946d3f | 0306af700c92c891df42b645e24270f86c950681 | /man/plot_lof.Rd | 66e57900b1b249589b522bb2f51bed0c1203f17e | [] | no_license | ngwalton/iec | 0fb0f1a0a8d1b293b32edab4eda103f95d50c33b | 8c84b9e260c298745ded8f2f89a6baf8c836e2b6 | refs/heads/master | 2020-05-21T00:07:00.050610 | 2015-11-09T23:29:55 | 2015-11-09T23:29:55 | 25,713,988 | 1 | 1 | null | 2016-11-02T22:00:40 | 2014-10-25T00:13:27 | R | UTF-8 | R | false | false | 723 | rd | plot_lof.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plot_functions.R
\name{plot_lof}
\alias{plot_lof}
\title{Plot BRC lack-of-fit (LOF).}
\usage{
plot_lof(brc, min_lof = 1)
}
\arguments{
\item{brc}{BRC data frame generated by \code{\link{est_brc}}.}
\item{min_lof}{numeric value indicating minimum LOF to label.}
}
\description{
\code{plot_lof} plots the LOF of for each taxon in a BRC data frame.
}
\details{
The LOF for each taxon in a BRC data frame (generated by
\code{\link{est_brc}}) is plotted on a single frame by index. This can be
helpful for spotting taxa with large LOF relative to other taxa. This is
intended as a heuristic plot only.
}
\seealso{
\code{\link{est_brc}}
}
|
b8e27799e260a4a83adf3a02173abf5add33a447 | 235f2010472122f049f0d6c044f5dd574d493c2b | /rveg/tests/testthat.R | 000281a2086c53d7cddbf9c7176fb450ac919bf5 | [
"MIT"
] | permissive | chenzheng1996/monitoring-ecosystem-resilience | 11aff1aeb6f469be61db397b50ae94e8e8cb7b95 | 387b038fa8d8ac5f0c4541afa7478db9d18701c5 | refs/heads/master | 2023-08-11T23:32:22.115685 | 2021-09-28T09:45:22 | 2021-09-28T09:45:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 73 | r | testthat.R | library(testthat)
#devtools::load_all()
library(rveg)
test_check("rveg")
|
88af398900e12475a9fd8df52d4049bdfe3ab1ac | 0bc2798ed0bd2279da3d84e1f992ec602eb04dca | /Tree Models/HR Analytics/hr_analytics.R | b9bd1e64992b6225cfb7d049fcd18c0e6ee830c8 | [] | no_license | ramkrishnaa32/PGDDS-Projects | 39dec0ba6af05ff7b693507c2b8cfb39f359274c | bbb996fbccd1e3e5c783594996ea83f306d613f3 | refs/heads/master | 2021-05-01T09:44:24.105661 | 2018-02-13T08:26:00 | 2018-02-13T08:26:00 | 121,098,296 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,262 | r | hr_analytics.R |
# File Impoting
hr_analytics <- read.csv("hr_analytics.csv")
str(hr_analytics)
# baseline accuracy
prop.table(table(hr_analytics$salary))
# divide into train and test set
set.seed(123)
split.indices <- sample(nrow(hr_analytics), nrow(hr_analytics)*0.8, replace = F)
train <- hr_analytics[split.indices, ]
test <- hr_analytics[-split.indices, ]
# Classification Trees
library(rpart)
library(rpart.plot)
library(caret)
#1 build tree model- default hyperparameters
tree.model <- rpart(salary ~ ., # formula
data = train, # training data
method = "class") # classification or regression
# display decision tree
prp(tree.model)
# make predictions on the test set
tree.predict <- predict(tree.model, test, type = "class")
# evaluate the results
confusionMatrix(tree.predict, test$salary)
#2 Changing the algorithm to "information gain" instead of default "gini"
tree.model <- rpart(salary ~ ., # formula
data = train, # training data
method = "class", # classification or regression
parms = list(split = "information")
)
# display decision tree
prp(tree.model)
# make predictions on the test set
tree.predict <- predict(tree.model, test, type = "class")
# evaluate the results
confusionMatrix(tree.predict, test$salary)
#3 Tune the hyperparameters ----------------------------------------------------------
tree.model <- rpart(salary ~ ., # formula
data = train, # training data
method = "class", # classification or regression
control = rpart.control(minsplit = 1000, # min observations for node
minbucket = 1000, # min observations for leaf node
cp = 0.05)) # complexity parameter
# display decision tree
prp(tree.model)
# make predictions on the test set
tree.predict <- predict(tree.model, test, type = "class")
# evaluate the results
confusionMatrix(tree.predict, test$salary)
#4 A more complex tree -----------------------------------------------------------------
tree.model <- rpart(salary ~ ., # formula
data = train, # training data
method = "class", # classification or regression
control = rpart.control(minsplit = 1, # min observations for node
minbucket = 1, # min observations for leaf node
cp = 0.001)) # complexity parameter
# display decision tree
prp(tree.model)
# make predictions on the test set
tree.predict <- predict(tree.model, test, type = "class")
# evaluate the results
confusionMatrix(tree.predict, test$salary)
#5 Cross test to choose CP ------------------------------------------------------------
library(caret)
# set the number of folds in cross test to 5
tree.control = trainControl(method = "cv", number = 5)
# set the search space for CP
tree.grid = expand.grid(cp = seq(0, 0.02, 0.0025))
# train model
tree.model <- train(salary ~ .,
data = train,
method = "rpart",
metric = "Accuracy",
trControl = tree.control,
tuneGrid = tree.grid,
control = rpart.control(minsplit = 50,
minbucket = 20))
# cross validated model results
tree.model
# look at best value of hyperparameter
tree.model$bestTune
# make predictions on test set
tree.predict <- predict.train(tree.model, test)
# accuracy
confusionMatrix(tree.predict, test$salary)
# plot CP vs Accuracy
library(ggplot2)
accuracy_graph <- data.frame(tree.model$results)
ggplot(data = accuracy_graph, aes(x = cp, y = Accuracy*100)) +
geom_line() +
geom_point() +
labs(x = "Complexity Parameter (CP)", y = "Accuracy", title = "CP vs Accuracy")
|
5b44eef8366dd98c6a207a075eaa07d499c137e2 | b99559c092f9a112435087bfc67e199aede2e469 | /clustering1.R | d481ebc5db88076519903e2e99118359fa57f601 | [] | no_license | luaburto/machine-learning | 26d17317e07e713ad16c1fe4c12d7648ce91b685 | a4ffe1f2525ca82f15135ec24d1ae98404db5f97 | refs/heads/master | 2022-01-16T07:30:14.777662 | 2019-07-23T09:40:37 | 2019-07-23T09:40:37 | 198,096,501 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,897 | r | clustering1.R | n <- 3 # no. of centroids
set.seed(1415) # set seed for reproducibility
M1 <- matrix(round(runif(100, 1, 5), 1), ncol = 2)
M2 <- matrix(round(runif(100, 7, 12), 1), ncol = 2)
M3 <- matrix(round(runif(100, 20, 25), 1), ncol = 2)
M <- rbind(M1, M2, M3)
C <- M[1:n, ] # define centroids as first n objects
obs <- length(M) / 2
A <- sample(1:n, obs, replace = TRUE) # assign objects to centroids at random
colors <- seq(10, 200, 25)
clusterplot <- function(M, C, txt) {
plot(M, main = txt, xlab = "", ylab = "")
for(i in 1:n) {
points(C[i, , drop = FALSE], pch = 23, lwd = 3, col = colors[i])
points(M[A == i, , drop = FALSE], col = colors[i])
}
}
clusterplot(M, C, "Initialization")
repeat {
# calculate Euclidean distance between objects and centroids
D <- matrix(data = NA, nrow = n, ncol = obs)
for(i in 1:n) {
for(j in 1:obs) {
D[i, j] <- sqrt((M[j, 1] - C[i, 1])^2 + (M[j, 2] - C[i, 2])^2)
}
}
O <- A
## E-step: parameters are fixed, distributions are optimized
A <- max.col(t(-D)) # assign objects to centroids
if(all(O == A)) break # if no change stop
clusterplot(M, C, "E-step")
## M-step: distributions are fixed, parameters are optimized
# determine new centroids based on mean of assigned objects
for(i in 1:n) {
C[i, ] <- apply(M[A == i, , drop = FALSE], 2, mean)
}
clusterplot(M, C, "M-step")
}
cl <- kmeans(M, n)
clusterplot(M, cl$centers, "Base R")
(custom <- C[order(C[ , 1]), ])
## [,1] [,2]
## [1,] 3.008 2.740
## [2,] 9.518 9.326
## [3,] 22.754 22.396
(base <- cl$centers[order(cl$centers[ , 1]), ])
## [,1] [,2]
## 2 3.008 2.740
# from http://blog.ephorie.de/learning-data-science-understanding-and-using-k-means-clustering
## 1 9.518 9.326
## 3 22.754 22.396
round(base - custom, 13)
## [,1] [,2]
## 2 0 0
## 1 0 0
## 3 0 0
data <- read.csv("<a class="vglnk" href="https://archive.ics.uci.edu/ml/machine-learning-databases/00292/Wholesale" rel="nofollow"><span>https</span><span>://</span><span>archive</span><span>.</span><span>ics</span><span>.</span><span>uci</span><span>.</span><span>edu</span><span>/</span><span>ml</span><span>/</span><span>machine</span><span>-</span><span>learning</span><span>-</span><span>databases</span><span>/</span><span>00292</span><span>/</span><span>Wholesale</span></a> customers data.csv", header = TRUE)
head(data)
## Channel Region Fresh Milk Grocery Frozen Detergents_Paper Delicassen
## 1 2 3 12669 9656 7561 214 2674 1338
## 2 2 3 7057 9810 9568 1762 3293 1776
## 3 2 3 6353 8808 7684 2405 3516 7844
## 4 1 3 13265 1196 4221 6404 507 1788
## 5 2 3 22615 5410 7198 3915 1777 5185
## 6 2 3 9413 8259 5126 666 1795 1451
set.seed(123)
k <- kmeans(data[ , -c(1, 2)], centers = 4) # remove columns 1 and 2, create 4 clusters
(centers <- k$centers) # display cluster centers
## Fresh Milk Grocery Frozen Detergents_Paper Delicassen
## 1 8149.837 18715.857 27756.592 2034.714 12523.020 2282.143
## 2 20598.389 3789.425 5027.274 3993.540 1120.142 1638.398
## 3 48777.375 6607.375 6197.792 9462.792 932.125 4435.333
## 4 5442.969 4120.071 5597.087 2258.157 1989.299 1053.272
round(prop.table(centers, 2) * 100) # percentage of sales per category
## Fresh Milk Grocery Frozen Detergents_Paper Delicassen
## 1 10 56 62 11 76 24
## 2 25 11 11 22 7 17
## 3 59 20 14 53 6 47
## 4 7 12 13 13 12 11
table(k$cluster) # number of customers per cluster
##
## 1 2 3 4
## 49 113 24 254
|
bfa06425856db566dd5b9c9511f6d57fb72a4774 | f6a1375e6453107cba75567ec0c3ba23a5ac7958 | /R/aggregate_list.R | cfce3dd15247db3f06d9824ac8e1f7de48912459 | [] | no_license | UW-GAC/analysis_pipeline | 7c04b61c9cafa2bcf9ed1b25c47c089f4aec0646 | df9f8ca64ddc9995f7aef118987553b3c31301a1 | refs/heads/master | 2023-04-07T03:13:52.185334 | 2022-03-23T21:15:46 | 2022-03-23T21:15:46 | 57,252,920 | 42 | 30 | null | 2023-03-23T20:13:40 | 2016-04-27T22:25:56 | R | UTF-8 | R | false | false | 1,955 | r | aggregate_list.R | library(argparser)
library(TopmedPipeline)
library(SeqVarTools)
sessionInfo()
argp <- arg_parser("Parse table of variants of regions to a list")
argp <- add_argument(argp, "config", help="path to config file")
argp <- add_argument(argp, "--chromosome", help="chromosome (1-24 or X,Y)", type="character")
argp <- add_argument(argp, "--version", help="pipeline version number")
argv <- parse_args(argp)
cat(">>> TopmedPipeline version ", argv$version, "\n")
config <- readConfig(argv$config)
chr <- intToChr(argv$chromosome)
required <- c("variant_group_file")
optional <- c("aggregate_type"="allele",
"group_id"="group_id",
"out_file"="aggregate_list.RData")
config <- setConfigDefaults(config, required, optional)
print(config)
writeConfig(config, paste0(basename(argv$config), ".aggregate_list.params"))
## file can have two parts split by chromosome identifier
outfile <- config["out_file"]
varfile <- config["variant_group_file"]
if (!is.na(chr)) {
outfile <- insertChromString(outfile, chr, err="out_file")
varfile <- insertChromString(varfile, chr)
}
groups <- getobj(varfile)
## rename columns if necessary
names(groups)[names(groups) %in% config["group_id"]] <- "group_id"
names(groups)[names(groups) %in% c("chromosome", "CHROM")] <- "chr"
names(groups)[names(groups) %in% c("position", "POS")] <- "pos"
names(groups)[names(groups) %in% "REF"] <- "ref"
names(groups)[names(groups) %in% "ALT"] <- "alt"
## subset groups by chromosome
groups <- groups[groups$chr == chr,]
if (!is.character(groups$chr)) groups$chr <- as.character(groups$chr)
if (config["aggregate_type"] == "allele") {
aggVarList <- aggregateGRangesList(groups)
} else if (config["aggregate_type"] == "position") {
aggVarList <- aggregateGRanges(groups)
} else {
stop("aggregrate_type must be 'allele' or 'position'")
}
save(aggVarList, file=outfile)
# mem stats
ms <- gc()
cat(">>> Max memory: ", ms[1,6]+ms[2,6], " MB\n")
|
61a0631573295a9fe2823d5145f0ff21252e6ac9 | b1e812ebd76a7d344340c65ddebc52758745b95b | /man/getDataPSQL.Rd | a482dc6dd47409e43e5194690a0e8dcbc5031d4a | [] | no_license | DevProgress/RVertica | 71a03ffbedc13e7f1f6cdeef135c6cb25f37ebf6 | 30d2f6d60b22018e68d6ca1d61ea25ccdf9ada93 | refs/heads/master | 2020-04-06T06:49:07.788645 | 2016-08-25T21:47:04 | 2016-08-25T21:47:04 | 64,185,049 | 2 | 3 | null | 2016-08-25T21:47:04 | 2016-07-26T02:58:15 | R | UTF-8 | R | false | true | 461 | rd | getDataPSQL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/queries.R
\name{getDataPSQL}
\alias{getDataPSQL}
\title{Run Sample Query via \code{psql}}
\usage{
getDataPSQL(sqlquery)
}
\arguments{
\item{sqlquery}{A SQL query as character string}
}
\value{
The dataset corresponding to the query.
}
\description{
Run a sample query against Vertica using \code{psql}.
}
\details{
The \code{psql} binary is required.
}
\author{
Dirk Eddelbuettel
}
|
f3bbc19989e67f9660937a64705a6530edcd3f72 | 0a907d48703647ba34d5d01cb2652fe974bc155e | /man/ncc.ci.sr.Rd | 55165b2d188760e3a45a4481c167049d1a590eb9 | [] | no_license | simonvandekar/Reproducible | b111b60ea1c86c008a037bbdc1f5bb3513f5ca58 | 3f2d88d70c8ec33ec416c5c2ad38f1351526871c | refs/heads/master | 2021-07-11T11:36:04.702407 | 2020-11-16T19:14:56 | 2020-11-16T19:14:56 | 219,840,544 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 494 | rd | ncc.ci.sr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncc.R
\name{ncc.ci.sr}
\alias{ncc.ci.sr}
\title{Symmetric range confidence interval for chi-square noncentrality parameter.}
\usage{
ncc.ci.sr(y, p, alpha = 0.05)
}
\arguments{
\item{y}{numeric, value of chi random variable.}
\item{p}{integer, degrees of freedom.}
\item{alpha}{probability for confidence interval.}
}
\description{
Code taken by SNV from https://www1.maths.leeds.ac.uk/~john/software/ncc/ncc.r.
}
|
f19c1d139fe73d4f98a15b0b3fb1c19d41c31040 | c54f712c4ddfe909de08dd0b52639d1af1a5f0e4 | /exercise-6-code (1).R | 2d4480bd3f8323767dc3f36afa69603a682e659e | [] | no_license | JEMunson/PS811-Excercises | 61a1cbd0249c414934888e8c87de72e8052833aa | 42ff0b8cb2162383ddd486837445fdb2939889f1 | refs/heads/master | 2023-01-19T22:26:52.670835 | 2020-12-03T22:16:27 | 2020-12-03T22:16:27 | 294,452,738 | 0 | 2 | null | 2020-11-15T00:45:06 | 2020-09-10T15:43:40 | TeX | UTF-8 | R | false | false | 3,872 | r | exercise-6-code (1).R | ---
title: 'Exercise 6: Base R vs. Tidyverse'
author: "Jessie Munson"
date: "10/22/2020"
output: pdf_document
---
# load packages
library("here")
library("haven")
library("magrittr")
library("tidyverse")
library("tidyr")
library("dplyr")
# setup folders and directories
here("data")
here("code")
#read in data
victual <- read_csv(here("data", "food_coded.csv"))
#make data frame just in case
as.data.frame(victual)
#view for reference
head(victual)
#extract the first 95 rows, named extracted data frame "victuals"
victuals<- victual[1:95,]
#look at variables
view(victuals$GPA)
view(victuals$calories_chicken)
view(victuals$drink)
view(victuals$fav_cuisine)
view(victuals$father_profession)
view(victuals$mother_profession)
#Create a new variable for how healthy each person feels but convert the scale from 1 to 10 to 1 to 100.
print(victuals$healthy_feeling2)
#Filter to students who are female and have GPAs that are above 3.0.
fem3<- filter(victuals, GPA > 3.0, Gender == 1)
fem3
#arrange favorite cuisine alphabetically
fem3c<- arrange(fem3, fem3$fav_cuisine)
fem3c
fem3cdta<- tibble(fem3c)
#Find the mean and standard deviation for the following variables, and summarize them in a data frame.
mean(fem3c$calories_chicken)
mean(fem3c$tortilla_calories)
mean(fem3c$turkey_calories)
mean(fem3c$waffle_calories)
sd(fem3c$calories_chicken)
sd(fem3c$tortilla_calories)
sd(fem3c$turkey_calories)
sd(fem3c$waffle_calories)
#summarize in new data frame
fem3_data <- tibble(c( mean(fem3c$calories_chicken),
mean(fem3c$tortilla_calories),
mean(fem3c$turkey_calories),
mean(fem3c$waffle_calories),
sd(fem3c$calories_chicken),
sd(fem3c$tortilla_calories),
sd(fem3c$turkey_calories),
sd(fem3c$waffle_calories)))
#summarize GPA -> I'm not sure this is right
summary(fem3cdta$GPA, fem3cdta$weight)
#now to tidyverse stuff
#read in the csv
veracity<- read.csv(here("data", "facebook-fact-check.csv"))
#extract the last 500 rows
veracity2<- veracity %>% top_n(-500)
#Look at the even-numbered column indices only. Identify them by name.
#I don't understand this
#make new coded post type variable
post_type_coded <- mutate(veracity2,
Post.Type = case_when(
Post.Type == "link" ~ 1,
Post.Type == "photo" ~ 2,
Post.Type == "text" ~ 3,
Post.Type == "video" ~ 4))
#page names in reverse order
arrange(veracity2, desc(Page))
#Find the mean and standard deviation for the following variables, and summarize them.
summarise(veracity2,
share_count.mean = mean(share_count, na.rm = TRUE),
share_count.sd = sd(share_count, na.rm = TRUE),
reaction_count.mean = mean(reaction_count),
reaction_count.sd = sd(reaction_count),
comment_count.mean = mean(comment_count),
comment_count.sd = sd(comment_count))
#na.rm removed the NAs I was getting in the summary.
#Summarize the mean and standard deviations in Question 7 with the
#"mainstream" values in the `category` variable.
veracity2 %>%
group_by("mainstream", Category) %>%
summarise(veracity2,
share_count.mean = mean(share_count, na.rm = TRUE),
share_count.sd = sd(share_count, na.rm = TRUE),
reaction_count.mean = mean(reaction_count),
reaction_count.sd = sd(reaction_count),
comment_count.mean = mean(comment_count),
comment_count.sd = sd(comment_count))
#Jess helped me with the last two as I don't understand the tidyverse yet.
#Do you know of any good tidyverse primers I could refer to? Especially one that talks
#about piping? |
5c2e0b5047e96d9eacd8c841b0bf02c9491d1ea8 | 2d662871298fadd5de7dd030257c8d6614369d9e | /Yapay sinir ağları.R | bdf2dca7ceaa8662378e8a18c01062b6b4409495 | [] | no_license | CemRoot/R-Language | 64091cee31346f92ed951f989f06fc267975353f | 62c1dca0bae747eca09874a34e2b7b6131ec158d | refs/heads/main | 2023-04-11T02:38:54.744005 | 2021-04-12T19:28:43 | 2021-04-12T19:28:43 | 353,489,620 | 1 | 0 | null | null | null | null | ISO-8859-9 | R | false | false | 2,205 | r | Yapay sinir ağları.R | #YAPAY SİNİR AĞLARI
# Boston veri seti icin
install.packages("MASS")
library(MASS)
# neural networks fonksiyonu icin
install.packages("neuralnet")
library(neuralnet)
# yapay sinir aglarini kullanmak icin verimizi olceklendirmemiz gerekiyor.
# tahmin edecegimiz degiskeni tam olarak 0 ile 1 arasina cekmemiz gerekiyor.
# tum degerleri 0 ile 1 arasina cekmek icin minumum degerlerinden cikartip
# maximum ile minumum'un farkina boluyoruz.
# diger degerleri ise ortalama degerlerinden cikartip standard sapmalarina boluyoruz.
Boston.scaled <- as.data.frame(scale(Boston))
min.medv <- min(Boston$medv)
max.medv <- max(Boston$medv)
Boston.scaled$medv <- scale(Boston$medv
, center = min.medv
, scale = max.medv - min.medv)
# tahmin edecegimiz degisken olan medv degiskeni bu sekilde olceklendirildi.
# Train-test ayrimi
Boston.train.scaled <- Boston.scaled[1:400, ]
Boston.test.scaled <- Boston.scaled[401:506, ]
# neural networks fonksiyonunun kullaniminda butun degiskenleri tek tek yazmamiz gerekiyor.
# !!! NEURAL NETWORKS KULLANIRKEN TUM DEGISKENLER SAYISAL OLMALIDIR !!!!
# eger birden cok kategoriye sahip bir degiskeniniz var ise model.matrix fonksiyonunu
# kullanin. ?model.matrix yazarak detaylari inceleyebilirsiniz.
Boston.nn.5.3 <- neuralnet(medv~crim+zn+indus+chas+nox+rm+age+dis+rad+tax+ptratio+black+lstat
, data=Boston.train.scaled
, hidden=c(5,3)
, linear.output=TRUE)
plot(Boston.nn.5.3)
# plot fonksiyonu ile kolayca yarattigimiz yapay sinir agini gorsellestirebiliriz.
Boston.nn.8 <- neuralnet(Boston.nn.fmla
, data=Boston.train.scaled
, hidden=8
, linear.output=TRUE)
plot(Boston.nn.8)
# boylece iki farkli sinir agi olusturduk.
# sonucu eski formatinda gormek icin sadece olceklendirmeyi tersine uygulamamiz gerekiyor.
|
5acfe332648e5f68e198c70659abd32aa15166bb | 0d74c6026340636cb7a73da2b53fe9a80cd4d5a5 | /simsem/man/SimSem-class.Rd | 0a56092fadf51f4fa42062761d9d0d2f6e6d8c39 | [] | no_license | simsem/simsem | 941875bec2bbb898f7e90914dc04b3da146954b9 | f2038cca482158ec854a248fa2c54043b1320dc7 | refs/heads/master | 2023-05-27T07:13:55.754257 | 2023-05-12T11:56:45 | 2023-05-12T11:56:45 | 4,298,998 | 42 | 23 | null | 2015-06-02T03:50:52 | 2012-05-11T16:11:35 | R | UTF-8 | R | false | false | 1,615 | rd | SimSem-class.Rd | \name{SimSem-class}
\Rdversion{1.1}
\docType{class}
\alias{SimSem-class}
\alias{summary,SimSem-method}
\title{Class \code{"SimSem"}}
\description{
The template containing data-generation and data-analysis specification
}
\section{Objects from the Class}{
Objects can be created by \code{\link{model}}.
}
\section{Slots}{
\describe{
\item{\code{pt}:}{ Parameter table used in data analysis }
\item{\code{dgen}:}{ Data generation template }
\item{\code{modelType}:}{ Type of models (CFA, Path, or SEM) contained in this object }
\item{\code{groupLab}:}{ The label of grouping variable }
\item{\code{con}:}{ The list of defined parameters, equality constraints, or inequality constraints specified in the model }
}
}
\section{Methods}{
\describe{
\item{summary}{Get the summary of model specification }
}
}
\author{
Patrick Miller (University of Notre Dame; \email{[email protected]}),
Sunthud Pornprasertmanit (\email{[email protected]})
}
\seealso{
\itemize{
\item Create an object this class by CFA, Path Analysis, or SEM model by \code{\link{model}}.
}
}
\examples{
showClass("SimSem")
loading <- matrix(0, 6, 2)
loading[1:3, 1] <- NA
loading[4:6, 2] <- NA
loadingValues <- matrix(0, 6, 2)
loadingValues[1:3, 1] <- 0.7
loadingValues[4:6, 2] <- 0.7
LY <- bind(loading, loadingValues)
summary(LY)
latent.cor <- matrix(NA, 2, 2)
diag(latent.cor) <- 1
RPS <- binds(latent.cor, 0.5)
# Error Correlation Object
error.cor <- matrix(0, 6, 6)
diag(error.cor) <- 1
RTE <- binds(error.cor)
CFA.Model <- model(LY = LY, RPS = RPS, RTE = RTE, modelType="CFA")
summary(CFA.Model)
}
|
b90c97eef2a6ead6cc14de575d86463a72490795 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rpf/examples/rpf.logprob.Rd.R | f0ff09048c8f68c34ae56145e7b7d24ec49b79fb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 599 | r | rpf.logprob.Rd.R | library(rpf)
### Name: rpf.logprob
### Title: Map an item model, item parameters, and person trait score into
### a probability vector
### Aliases: rpf.logprob rpf.logprob,rpf.1dim,numeric,numeric-method
### rpf.logprob,rpf.1dim,numeric,matrix-method
### rpf.logprob,rpf.mdim,numeric,matrix-method
### rpf.logprob,rpf.mdim,numeric,numeric-method
### rpf.logprob,rpf.mdim,numeric,NULL-method rpf_logprob_wrapper
### ** Examples
i1 <- rpf.drm()
i1.p <- rpf.rparam(i1)
rpf.logprob(i1, c(i1.p), -1) # low trait score
rpf.logprob(i1, c(i1.p), c(0,1)) # average and high trait score
|
a60bd783b633ce96bfc46b4fcb5c585fabc69e2f | b400255589a974e4fb8a7c468f7a967649c10c25 | /man/pkg_dev.Rd | 9da87900be9aa0e56086280d48da8f1d61dd8489 | [
"MIT"
] | permissive | cmil/ghactions | 701730602d785a7e313e9550dde5ee68dff69f1a | 10d3c3a41b2de651589aed91d06388579b6a3d2a | refs/heads/master | 2020-12-05T17:59:58.736614 | 2020-01-06T22:56:10 | 2020-01-06T22:56:10 | 232,199,293 | 0 | 0 | MIT | 2020-01-06T22:49:33 | 2020-01-06T22:49:32 | null | UTF-8 | R | false | true | 2,029 | rd | pkg_dev.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/steps.R
\name{pkg_dev}
\alias{pkg_dev}
\alias{rcmd_check}
\alias{covr}
\title{CI/CD steps for a package at the repository root}
\usage{
rcmd_check(name = "Check Package")
covr(name = "Run Code Coverage")
}
\arguments{
\item{name}{\verb{[character(1)]}
giving additional options for the step.
Multiline strings are not supported.
Defaults to \code{NULL}.}
\item{...}{
Arguments passed on to \code{\link[=step]{step}}
\describe{
\item{\code{id}}{\verb{[character(1)]}
giving additional options for the step.
Multiline strings are not supported.
Defaults to \code{NULL}.}
\item{\code{if}}{\verb{[character(1)]}
giving additional options for the step.
Multiline strings are not supported.
Defaults to \code{NULL}.}
\item{\code{shell}}{\verb{[character(1)]}
giving additional options for the step.
Multiline strings are not supported.
Defaults to \code{NULL}.}
\item{\code{with}}{\verb{[list()]}
giving a named list of additional parameters.
Defaults to \code{NULL}.}
\item{\code{env}}{\verb{[list()]}
giving a named list of additional parameters.
Defaults to \code{NULL}.}
\item{\code{working-directory}}{\verb{[character(1)]}
giving the default working directory.
Defaults to \code{NULL}.}
\item{\code{continue-on-error}}{\verb{[logical(1)]}
giving whether to allow a job to pass when this step fails.
Defaults to \code{NULL}.}
\item{\code{timeout-minutes}}{\verb{[integer(1)]}
giving the maximum number of minutes to run the step before killing the process.
Defaults to \code{NULL}.}
}}
}
\description{
CI/CD steps for a package at the repository root
}
\section{Functions}{
\itemize{
\item \code{rcmd_check}: \code{\link[rcmdcheck:rcmdcheck]{rcmdcheck::rcmdcheck()}}
\item \code{covr}: \code{\link[covr:codecov]{covr::codecov()}}
}}
\seealso{
Other steps:
\code{\link{checkout}()},
\code{\link{deploy_static}},
\code{\link{install_deps}()},
\code{\link{rscript}()}
}
\concept{pkg_development}
\concept{steps}
|
6d7ef664e7b44a149556c7b81516dde4070bc65c | bfb23bde5d451fdbe5b66482083656afe311510d | /tests/testthat/test_compile_report.R | 8a2747fd54c93a07c2ff692b1a6b2e66f2a904c1 | [] | no_license | vfulco/reportfactory | b96cb9961ff16c819bcb20fcb2c117c1df451347 | 1cca5b9e6241469377c4a9047a5a7580185551b9 | refs/heads/master | 2020-03-22T08:05:55.466548 | 2018-06-29T15:55:14 | 2018-06-29T15:55:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 543 | r | test_compile_report.R | context("Test report compilation")
test_that("Compilation can handle multiple outputs", {
skip_on_cran()
setwd(tempdir())
random_factory()
compile_report(list_reports(pattern = "foo")[1], quiet = TRUE)
outputs <- sub("([[:alnum:]_-]+/){2}", "",
list_outputs())
outputs <- sort(outputs)
ref <- c("figures/boxplots-1.pdf", "figures/boxplots-1.png",
"figures/violins-1.pdf", "figures/violins-1.png",
"foo_2018-06-29.html", "outputs_base.csv" )
expect_identical(ref, outputs)
})
|
99c551547fd77c37277e0108ec39f21cca0043e3 | cbb37354c93299164fc2b88e5a74901a48ae9551 | /R/tablefilter.R | f1aa7319bf12c114333e103152b7c874cf30f9fb | [] | no_license | skranz/shinyEventsUI | 2de099b12a05e313f94df5919a32ffc44cde4565 | e05f1eb202f15174f054be914aed9920aa60148b | refs/heads/master | 2021-07-12T05:23:36.408108 | 2021-04-01T11:18:40 | 2021-04-01T11:18:40 | 53,415,508 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,001 | r | tablefilter.R | example.table.filter = function() {
library(SeminarMatching)
script.dir = "D:/libraries/shinyEventsUI/shinyEventsUI/inst/www"
app = eventsApp()
n = 10
df = data.frame(a = sample(1:2,n, replace = TRUE), x = runif(n))
html = html.table(df,id="mytab")
app$ui = bootstrapPage(
HTML(html),
add.table.filter("mytab", filter.type="select", num.cols=5)
)
viewApp(app)
}
table.filter.header = function() {
addShinyEventsUIRessourcePath()
singleton(tags$head(tags$script(src="shinyEventsUI/tablefilter_all_min.js")))
}
add.table.filter = function(table.id, filter.type = "select", num.cols=10, add.header=TRUE) {
restore.point("add.table.filter")
inner = paste0("col_",seq_len(num.cols)-1,': "', filter.type,'"', collapse=",\n" )
js = paste0('
var myfilter = {
',inner,',
};
var tf = setFilterGrid("',table.id,'",myfilter);
')
if (add.header) {
return(tagList(
table.filter.header(),
tags$script(HTML(js))
))
}
tags$script(HTML(js))
}
|
8ec94acca5dc1bd6495d21eb66429d580ba3cc3d | 3803bb7b319c8b37910d0fa42e20b73d4f40e031 | /data.R | 2d4703b6a4d1662efe250b26bec1c7a484010fa7 | [] | no_license | kartbilon/csv | d79a4e70f32bea52ff993e60c824ac07c4040ca7 | 2920c4569510e71285e159ef2363b8c1c80b788b | refs/heads/master | 2020-12-03T23:07:15.359026 | 2020-01-03T05:14:03 | 2020-01-03T05:14:03 | 231,516,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,191 | r | data.R | library(httr)
library(rvest)
library(RSelenium)
# 3.7초만큼 딜레이를 주는 코드
# testit <- function(x)
# {
# p1 <- proc.time()
# Sys.sleep(x)
# proc.time() - p1 # The cpu usage should be negligible
# }
# testit(3.7)
remD <- remoteDriver(port = 4445, # 포트번호 입력
browserName = "safari") #사용할 브라우저
remD$open()
remD$navigate("https://www.youtube.com/user/bokyemtv/videos")
remD$executeScript("window.scrollTo(0,100)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(100,200)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(200,300)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(300,400)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(400,500)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(500,600)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(600,700)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(700,800)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(800,900)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(900,1000)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(1000,1100)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(1100,1200)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(2400,2500)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(2600,2700)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(2800,2900)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
remD$executeScript("window.scrollTo(3000,3100)")
testit <- function(x)
{
p1 <- proc.time()
Sys.sleep(x)
proc.time() - p1 # The cpu usage should be negligible
}
testit(0.5)
html <- remD$getPageSource()[[1]]
html <- read_html(html)
youtube_title = c()
youtube_thumbnail = c()
youtube_url = c()
youtubevideo = "youtube.com"
title_css = "div#items a#video-title"
thumbnail_css ="div#items img#img"
title_node = html_nodes(html,title_css)
thumbnail_node = html_nodes(html, thumbnail_css)
title_part = html_text(title_node)
thumbnail_part = html_attr(thumbnail_node, "src")
url_part = html_attr(title_node, "href")
url_part2= paste0(youtubevideo,url_part )
title_part = title_part [1:100]
thumbnail_part = thumbnail_part [1:100]
url_part2 = url_part2 [1:100]
youtube_title = c(youtube_title,title_part)
youtube_thumbnail=c(youtube_thumbnail,thumbnail_part)
youtube_url = c(youtube_url,url_part2)
earloflemongrab31 = cbind(youtube_title, youtube_thumbnail,youtube_url)
write.csv (earloflemongrab31,"/Users/hwanghyeon-u/Desktop/bk.csv", fileEncoding = "utf8")
|
b229d10e9bda7aca52d0e5bdf176762bb85b8438 | b47d0dee49601b05b9b7fa01b10717d1cd9564ee | /_site/tests/test-pairs.R | 257cce51def7805ff9aaa950c3bd130a41649fc5 | [
"MIT"
] | permissive | dchudz/predcomps | 445760dd4c33b0795ea97d8c219c12e1dbf8ab46 | cc3bf1155cc01f496da231af4facfc47d986d046 | refs/heads/master | 2021-01-02T09:26:13.537651 | 2018-06-26T12:31:36 | 2018-06-26T12:31:36 | 14,263,610 | 16 | 11 | MIT | 2018-06-26T12:31:37 | 2013-11-09T19:35:23 | R | UTF-8 | R | false | false | 1,022 | r | test-pairs.R | MakeComparable <- function(df) {
df <- round(df, digits = 5)
return(df[do.call(order, df), ])
}
test_that("GetPairs works right in a small example", {
df <- data.frame(X = rep(c(1,2),2),
Y = rep(c(2,4),2))
pairsActual <- GetPairs(df, "X", "Y")
pairsExpected <- data.frame(OriginalRowNumber = c(1L, 1L, 1L, 2L, 2L, 2L, 3L, 3L, 3L, 4L, 4L, 4L),
X = c(1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2),
Y = c(2, 2, 2, 4, 4, 4, 2, 2, 2, 4, 4, 4),
X.B = c(2, 1, 2, 1, 1, 2, 1, 2, 2, 1, 2, 1),
Weight = c(0.166666666666667, 0.666666666666667, 0.166666666666667, 0.166666666666667, 0.166666666666667, 0.666666666666667, 0.666666666666667, 0.166666666666667, 0.166666666666667, 0.166666666666667, 0.666666666666667, 0.166666666666667))
pairsActual <- pairsActual[names(pairsExpected)]
expect_that(all.equal(MakeComparable(pairsActual), MakeComparable(pairsExpected)), is_true())
})
|
c08e529005e721b9f5b33cf3dd2b1fad49d31ac5 | 7d840154a12fc1012ea72cdba3032bcdd2ebfeee | /man/wbw.Rd | a22a76986cdca657faf4a24eef916f787b6e55df | [
"MIT"
] | permissive | bandyopd/cenROC | b875d2fb1b4dcae8e8eb682d71dba699b4393ff3 | 98bf995accc633fd6e5990abee65936233ea9ba2 | refs/heads/master | 2022-04-12T18:09:17.015754 | 2020-03-31T14:12:20 | 2020-03-31T14:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,405 | rd | wbw.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bw.R
\name{wbw}
\alias{wbw}
\title{Function to select the bandwidth parameter needed for smoothing the time-dependent ROC curve.}
\usage{
wbw(X, wt, bw = "NR", ktype = "normal")
}
\arguments{
\item{X}{The numeric data vector.}
\item{wt}{The non-negative weight vector.}
\item{bw}{A character string specifying the bandwidth selection method. The possible options are "\code{NR}" for the normal reference, the plug-in "\code{PI}" and cross-validation "\code{CV}".}
\item{ktype}{A character string indicating the type of kernel function: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}". Default is "\code{normal}" kernel.}
}
\value{
Returns the estimated value for the bandwith parameter.
}
\description{
This function computes the data-driven bandwidth value for smoothing the ROC curve.
It contains three methods: the normal refrence, the plug-in and the cross-validation methods.
}
\references{
Beyene, K. M. and El Ghouch A. (2019). Smoothed time-dependent ROC curves for right-censored survival data. <\url{https://dial.uclouvain.be/pr/boreal/object/boreal:219643}>.
}
\author{
Kassu Mehari Beyene, Catholic University of Louvain. \code{<[email protected]>}
Anouar El Ghouch, Catholic University of Louvain. \code{<[email protected]>}
}
\keyword{internal}
|
80701ef07afa5c867844b9ce8c9df44a2373a839 | 19c861d31f78661a83c38a133edd8c4f6eac0336 | /man/revgray.Rd | 66cf3e5964c49ee4c0eceda0bcef162804d96dfa | [] | no_license | cran/broman | 8db38ff459ffda1645c01cb145b15aa4ea8e3647 | 90ae16237e25ee75600b31d61757f09edf72ad91 | refs/heads/master | 2022-07-30T16:46:40.198509 | 2022-07-08T14:30:09 | 2022-07-08T14:30:09 | 17,694,892 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 693 | rd | revgray.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/revgray.R
\name{revgray}
\alias{revgray}
\title{Create vector of colors from white to black}
\usage{
revgray(n = 256, ...)
}
\arguments{
\item{n}{Number of colors.}
\item{...}{Passed to \code{\link[grDevices:gray]{grDevices::gray()}}.}
}
\value{
Vector of colors, from white to black
}
\description{
Calls \code{\link[grDevices:gray]{grDevices::gray()}} then \code{\link[base:rev]{base::rev()}}
}
\details{
There's not much to this. It's just \verb{gray((n:0)/n))}
}
\examples{
x <- matrix(rnorm(100), ncol=10)
image(x, col=revgray())
}
\seealso{
\code{\link[grDevices:gray]{grDevices::gray()}}
}
\keyword{color}
|
11e36eea63a1862524f20e488d674ddd166fc940 | 7d42b047a927c159e9e0a417eefdea85860c80c6 | /project/p3.R | 5c82beb2f15581096d680c79b6f42aa809d04198 | [] | no_license | gdwangh/coursera-dataScientists-8-Practical-Machine-Learning | b465ae76846b95a6639e45078377215665797d86 | f682e79d509b07f0669f8ada1d9a5c8af81c55b0 | refs/heads/master | 2020-05-20T05:54:00.153360 | 2014-12-07T15:25:15 | 2014-12-07T15:25:15 | 26,563,358 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,895 | r | p3.R | library(caret)
library(randomForest)
setwd("./project")
tmp_ds<-read.csv("pml-training.csv", na.strings=c("NA","","#DIV/0!"))
test_ds<-read.csv("pml-testing.csv", na.strings=c("NA","","#DIV/0!"))
set.seed(12345)
inTrain = createDataPartition(tmp_ds$classe, p = 0.75)[[1]]
train_ds = tmp_ds[ inTrain, ]
valid_ds = tmp_ds[-inTrain, ]
var_list<-c(8:11,37:45,46:49,60:68,84:86,102,113:121,122:124,140,151:159)
train_classe<-train_ds[,160]
train_ds<-train_ds[,var_list]
valid_classe<-valid_ds[,160]
valid_ds<-valid_ds[,var_list]
# 先删去近似于常量的变量
zerovar <- nearZeroVar(x1)
# 再删去相关度过高的自变量
highCorr<-findCorrelation(cor(train_ds), 0.90)
train_ds<-train_ds[,-highCorr]
# 数据预处理步骤(标准化,缺失值处理)
Process <- preProcess(train_ds)
trainPC <- predict(Process, train_ds)
# tuning the parameter
trCtrl=trainControl(method = "cv", returnResamp="all", repeats=3)
fit<-train(train_ds, train_classe, method = "rf", trControl=trCtrl)
# cross-valaid, 10 folder which is default
ctrl<-rfeControl(functions=rfFuncs, returnResamp="all", method="cv")
rfProfile <- rfe(trainPC, train_classe, sizes=c(1:ncol(trainPC)), rfeControl=ctrl)
rfProfile
# get a text string of variable names that were picked in the final model
predictors(rfProfile)
# predict
pred<-predict(rfProfile, valid_ds)
# Calculates performance
postResample(pred, valid_classe)
confusionMatrix(pred$pred,valid_classe)
# produces the performance profile across different subset sizes
trellis.par.set(caretTheme())
plot(rfProfile,type=c('o','g'))
xyplot(rfProfile, type = c("g", "p", "smooth"))
http://topepo.github.io/caret/featureselection.html
train_1<-train_ds[,c("yaw_belt", "magnet_dumbbell_z", "pitch_belt", "magnet_dumbbell_y", "pitch_forearm")]
rffit<-randomForest(train_classe~., train_1)
pred<-predict(rffit, valid_ds)
confusionMatrix(pred, valid_classe)
|
88d7ad05fc76c61a30e176d61c1c810f969b7385 | b1dfc3a819693c0cd4e840b741fb70a312383a44 | /animate_circadian_4cseq_again.R | 14069393a9b2adbbe11797683259147dad565542 | [] | no_license | jakeyeung/Circadian4Cseq | 36db093c7c1965bd86525b11d09afff0b749aa22 | 65d190e488d2e1720560b10b4991c36bd2e85524 | refs/heads/master | 2020-09-22T01:39:36.746885 | 2020-08-07T17:08:47 | 2020-08-07T17:08:47 | 225,005,564 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,200 | r | animate_circadian_4cseq_again.R | # Jake Yeung
# Date of Creation: 2018-10-28
# File: ~/projects/4c_seq/scripts/primetime_animations/animate_around_the_clock.R
# Animate batch 3 around the clock
# Stolen from: ~/projects/4c_seq/scripts/batch3_4cseq_analysis/load_robj_plot_summary.batch3.WT_vs_Cry1intronKO.R
rm(list=ls())
jstart <- Sys.time()
library(here)
library(ggplot2)
library(gganimate)
library(tweenr)
library(transformr)
library(dplyr)
library(PhaseHSV)
library(Circadian4Cseq)
MakeAnimation <- function(jsub.orig, nf = 20, ks = 0, jfps = 20, vline = -28000,
plot.base = "/Users/yeung/projects/4c_seq_analysis/WT_plot"){
plot.out <- paste0(plot.base, ".fixed.pdf")
anim.out <- paste0(plot.base, ".fps.", jfps, ".fixed_rotated.11.5.gif")
jsub <- split(jsub.orig, jsub.orig$time)
# Here comes tweenr
jsub_tween <- jsub$ZT00 %>%
tween_state(jsub$ZT04, ease = 'linear', nframes = nf) %>%
keep_state(ks) %>%
tween_state(jsub$ZT08, ease = 'linear', nframes = nf) %>%
keep_state(ks) %>%
tween_state(jsub$ZT12, ease = 'linear', nframes = nf) %>%
keep_state(ks) %>%
tween_state(jsub$ZT16, ease = 'linear', nframes = nf) %>%
keep_state(ks) %>%
tween_state(jsub$ZT20, ease = 'linear', nframes = nf) %>%
keep_state(ks) %>%
tween_state(jsub$ZT00, ease = 'linear', nframes = nf)
# plot
ltype <- "solid"; jtitle <- "ZT"
jaspect.ratio <- 0.25
zts <- seq(0, 20, 4)
rotate.hrs <- -8
zts.rotated <- RotatePhase(zts, rotate.hr = rotate.hrs)
hex.cols <- hsv(PhaseToHsv(zts.rotated, 0, 24), 1, 1)
# change yellow to darker yellow
zt11.rotated <- RotatePhase(11.5, rotate.hr = rotate.hrs)
zt11.col <- hsv(PhaseToHsv(zt11.rotated, 0, 24), 1, 1)
# hex.cols[which(hex.cols == "#FFFF00")] <- "#FFD500"
hex.cols[which(hex.cols == "#FFFF00")] <- zt11.col
p.orig <- ggplot(jsub.orig, aes(x = pos, y = A, colour = time)) +
geom_line(data = subset(jsub.orig, LR == "Left"), aes(x = pos, y = A, colour = time), linetype = ltype) +
geom_line(data = subset(jsub.orig, LR == "Right"), aes(x = pos, y = A, colour = time), linetype = ltype) +
theme(aspect.ratio = 0.25) + geom_hline(aes(yintercept=0)) +
theme_bw() + ggtitle(jtitle) + geom_vline(aes(xintercept=vline), linetype="dotted") +
theme(legend.position = "bottom") +
xlab("Position relative to bait") +
ylab("Log10 Signal") +
theme(aspect.ratio = jaspect.ratio, strip.background = element_blank(),strip.text.y = element_blank()) +
scale_color_manual(values = hex.cols) +
scale_x_continuous(labels=bToKb()) +
labs(x = "Position relative to bait [kb]",
y = expression('4C signal [log'[10]*']')) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggsave(filename = plot.out)
jsub_tween <- jsub_tween %>%
mutate(.ZT = signif(.frame * ((2 * pi) / 120) * (24 / (2 * pi)), digits = 2))
# hex.cols.blue <- rep("#0000ff", 6)
p <- ggplot(jsub_tween, aes(x = pos, y = A, colour = time)) +
geom_line(data = subset(jsub_tween, LR == "Left"), aes(x = pos, y = A, colour = time), linetype = ltype) +
geom_line(data = subset(jsub_tween, LR == "Right"), aes(x = pos, y = A, colour = time), linetype = ltype) +
theme(aspect.ratio = 0.25) + geom_hline(aes(yintercept=0)) +
theme_bw() + ggtitle(jtitle) + geom_vline(aes(xintercept=-28000), linetype="dotted") +
theme(legend.position = "bottom") +
xlab("Position relative to bait") +
ylab("Log10 Signal") +
theme(aspect.ratio = jaspect.ratio, strip.background = element_blank(),strip.text.y = element_blank()) +
scale_color_manual(values = hex.cols) +
scale_x_continuous(labels=bToKb()) +
labs(x = "Position relative to bait [kb]",
y = expression('4C signal [log'[10]*']'),
# title = "ZT: {closest_state}") +
title = "ZT: {frame_time}") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
transition_time(.ZT) +
# transition_states(.ZT, transition_length = 1, state_length = 0) +
ease_aes('linear')
animate(p, renderer = gifski_renderer(), fps = jfps)
anim_save(filename = anim.out)
return(p)
}
# Try to animate ----------------------------------------------------------
# load("/Users/yeung/projects/4c_seq_analysis/counts.long.merged.Robj", v=T)
data(counts.long.merged.Robj)
jsig <- 2500
pos.max <- 1e5
jsub.orig <- subset(counts.long.merged, sig == jsig & genotype == "Liver_WT" & abs(pos) < pos.max)
nf <- 20
ks <- 0
jfps <- 22
outdir <- "gifs_outputs_test"
dir.create(outdir)
p.WT <- MakeAnimation(subset(counts.long.merged, sig == jsig & genotype == "Liver_WT" & abs(pos) < pos.max),
nf = nf, ks = ks, jfps = jfps,
plot.base = file.path(outdir, "WT_plot"))
p.KO <- MakeAnimation(subset(counts.long.merged, sig == jsig & genotype == "Liver_Cry1intronKO" & abs(pos) < pos.max),
nf = nf, ks = ks, jfps = jfps,
plot.base = file.path(outdir, "KO_plot"))
# print(p.WT)
# print(p.KO)
# animate(p.WT, fps = 50, renderer = gifski_renderer())
# animate(p.KO, fps = 50, renderer = gifski_renderer())
|
eb7d279731de129c703bab4ebe3dca032aa8b2c2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/broom/examples/tidy.kde.Rd.R | 93c6ec366289dd822ec17353440f34c79a874b0b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 488 | r | tidy.kde.Rd.R | library(broom)
### Name: tidy.kde
### Title: Tidy a(n) kde object
### Aliases: tidy.kde kde_tidiers ks_tidiers
### ** Examples
if (requireNamespace("ks", quietly = TRUE)) {
library(ks)
dat <- replicate(2, rnorm(100))
k <- kde(dat)
td <- tidy(k)
td
library(ggplot2)
ggplot(td, aes(x1, x2, fill = estimate)) +
geom_tile() +
theme_void()
# also works with 3 dimensions
dat3 <- replicate(3, rnorm(100))
k3 <- kde(dat3)
td3 <- tidy(k3)
td3
}
|
99855d150e23192ca801ed45f051c46655b72b79 | 085377a522b1a43fe6cbb073ca61bcc8f0eadef6 | /R_course_updated.R | c56854feb26f8f5be13622e0fbee92b25f75beef | [] | no_license | aojgbenga/plymouth_r_machine_learning | 78c6fb89b86977fccad9e06cc96d34d91f75ad95 | aa93d29ca073b96d5d102182c7e98b2eb97dd0a5 | refs/heads/master | 2022-05-30T20:41:27.448597 | 2020-05-03T10:54:32 | 2020-05-03T10:54:32 | 260,251,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,936 | r | R_course_updated.R | library(dplyr)
library(ggplot2)
library(caret)
library(e1071)
library(class)
library(randomForest)
library(tree)
orchid <- read.table(url("https://gist.githubusercontent.com/CptnCrumble/e01af3b83ffc463f4bb5776d0213f14b/raw/5382eee21b6e5b796541fd8053c5f733fd6eb9c7/orchids.txt"))
attach(orchid)
#Plotting the boxplots of the data
boxplot(X1 ~ loc,
xlab = "Location",
ylab = "Petal length",
col = c("#f54242", "#d4f542", "#42f56f"))
boxplot(X2 ~ loc,
xlab = "Location",
ylab = "Leaf width",
col = c("#f54242", "#d4f542", "#42f56f"))
boxplot(X3 ~ loc,
xlab = "Location",
ylab = "Petal width",
col = c("#f54242", "#d4f542", "#42f56f"))
aggregate(X1 ~ loc, FUN = mean, 2)
# The mean of the petal length (X1) shows there is difference between the three locations
aggregate(X2 ~ loc, FUN = mean)
# The mean of the leaf width also shows there is a difference between the three locations
aggregate(X3 ~ loc, FUN = mean)
# The mean of the petal width are very close and cannot be used to differenciate
# between the locations of the data
# From the data provided only the Petal length (X1) and Leaf width (X2) would be
# Reliable to differenciate between the data
# Orchids bivariate scatter plots
ggplot(orchid, aes( x = X1, y = X2)) +
geom_point(aes (color = factor(loc)), shape = 16, size = 2)+
theme_classic()+
labs(title ="Orchids graph", x = "Petal length (mm)", y = "Leaf width (mm)", color = "Orchids Location\n")
# Creating Training data and test data
set.seed(1)
data.subset <- sample(270, 210)
model.train <- orchid[data.subset,]
model.test <- orchid[-data.subset,]
set.seed(1)
##########################################################################
# KNN method
set.seed(1)
model.knn <- train(loc~.-X3,
data = model.train,
method = "knn",
trControl = trainControl(method = "LOOCV"),
preProcess = c("center", "scale"), # Normalize the data
tuneLength = 10) # Number of possible K values to evaluate
plot(model.knn)
model.knn$bestTune
predict.knn <- model.knn %>% predict(model.test)
# Plotting the KNN graph
pl = seq(min(model.test$X1), max(model.test$X1), by=0.1)
pw = seq(min(model.test$X2), max(model.test$X2), by=0.1)
# generates the boundaries for the graph
lgrid <- expand.grid(X1=pl, X2=pw, X3=19.73)
knnPredGrid <- predict(model.knn, newdata=lgrid)
knnPredGrid <- model.knn %>% predict(lgrid)
knnPredGrid = as.numeric(knnPredGrid)
predict.knn <- as.numeric(predict.knn)
model.test$loc <- predict.knn
probs <- matrix(knnPredGrid,
length(pl),
length(pw))
contour(pl, pw, probs, labels="",
xlab="Petal length (mm)", ylab="leaf width (mm)",
main="K-Nearest Neighbor", axes=T)
gd <- expand.grid(x=pl, y=pw)
points(gd, pch=3, col=probs, cex = 0.1)
# add the test points to the graph
points(model.test$X1, model.test$X2, col= model.test$loc, cex= 2, pch = 20)
####################################################################################
# Random forest Bagging method
set.seed(1)
bag.tree <- randomForest(loc ~ . -X3, data = orchid, subset = data.subset,
mtry = 2, importance = TRUE)
round(importance(bag.tree), 2)
varImpPlot(bag.tree)
bag_predict <- predict(bag.tree, model.test, type = "class")
# Creating plot for Bagging method using base R plot
lgrid <- expand.grid(X1=pl, X2=pw, X3=19.73)
bagPredGrid <- predict(bag.tree, newdata=lgrid)
bagPredGrid <- bag.tree %>% predict(lgrid)
bagPredGrid = as.numeric(bagPredGrid)
predict.bag <- as.numeric(bag_predict)
model.test$loc <- predict.bag
probs <- matrix(bagPredGrid, length(pl), length(pw))
contour(pl, pw, probs, labels="",
xlab="Petal length (mm)", ylab="leaf width (mm)",
main="Random forest Bagging method", axes=T)
gd <- expand.grid(x=pl, y=pw)
points(gd, pch=3, col=probs)
# add the test points to the graph
points(model.test$X1, model.test$X2, col= model.test$loc, cex= 2, pch = 20)
######################################################################
# Support vector machine
set.seed(1)
tune.out = tune(svm, loc ~ X1 + X2, data = orchid[data.subset,],
kernel ="linear",
ranges = list(cost = seq(from = 0.01,to = 2, length = 40) ))
plot(tune.out$performances$cost, tune.out$performances$error)
summary(tune.out)
tune.out$best.model$cost
bestmod = tune.out$best.model
summary(bestmod)
plot(bestmod, data = model.test, X2~X1)
ypred_linear = predict(bestmod, model.test)
# Support vector machine polynomial kernels
set.seed(1)
tune.out_poly = tune(svm, loc ~ X1 + X2, data = orchid[data.subset,],
kernel ="polynomial",
ranges = list(cost = seq(from = 0.01,to = 3, length = 30)))
plot(tune.out_poly$performances$cost, tune.out_poly$performances$error)
summary(tune.out_poly)
bestmod_poly = tune.out_poly$best.model
summary(bestmod_poly)
tune.out_poly$best.model$cost
plot(bestmod_poly, data = model.test, X2~X1)
ypred = predict(bestmod_poly, model.test)
# Which kernal is more suitable
# Calculating Test Errors
# KNN method
tab <- table(predict.knn,model.test$loc)
tab
1-(sum(tab) - sum(diag(tab))) / sum(tab)
# Random forest bagging method
tab.bag <- table(bag_predict,model.test$loc)
tab.bag
1-(sum(tab.bag) - sum(diag(tab.bag))) / sum(tab.bag)
# Support vector machine Linear
tab.linear <- table(ypred_linear,model.test$loc)
tab
1-(sum(tab.linear) - sum(diag(tab.linear))) / sum(tab.linear)
# Support vector machine Polynomial kernel
tab.poly <- table(ypred,model.test$loc)
tab.poly
1-(sum(tab.poly) - sum(diag(tab.poly))) / sum(tab.poly)
|
483712ce9ec283e43ec9bbc55551b7955bb6b3f9 | a94308678716ab60f03956e503f554a767e73733 | /ExampleCode/Chapter 2 Observed Score Methods/08_DIF Logistic Regression Polytomous.R | 22a91b48c9deb64858f86381f78eb00348a6dbf1 | [] | no_license | cswells1/MeasInv | 9f9cb20da68b695cc1f65fc5c80f92ea31b030e7 | b74acffcf8ec0d6886f7081882aa3965306eb4af | refs/heads/master | 2023-07-14T21:35:32.915150 | 2021-09-12T22:50:49 | 2021-09-12T22:50:49 | 405,707,567 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,815 | r | 08_DIF Logistic Regression Polytomous.R | #################################################################################################
# The following code uses Logistic Regression to test for DIF using the DIF.Logistic #
# function. #
#################################################################################################
# Load rms package: It performs logistic (binary and ordinal) that is used by #
# the DIF.Logistic function. #
library(rms)
# Read data: The data are in in a csv file in which first 30 columns #
# represent item responses and the last column contains the grouping #
# variable. #
myfile <- system.file("extdata", "LikertData.csv", package = "MeasInv")
Likert.data <- read.csv(myfile, sep=",", header=T)
Likert.data$group <- factor(Likert.data$group) # Convert the grouping variable, "group", to a factor #
# which means R treats it as an unordered-categorical #
# (i.e., grouping) variable. #
# Use logistic regression to test for DIF using the lrm function which is in the rms package. #
raw.score <- apply(X = Likert.data[,1:12], MARGIN = 1, FUN = sum) # Compute raw score #
grp <- Likert.data$group
lrm(Likert.data[,1] ~ raw.score + grp + raw.score*grp) # Perform logistic regression: Model 3 #
lrm(Likert.data[,1] ~ raw.score) # Perform logistic regression: Model 1 #
# Perform DIF using logistic regression via the DIF.Logistic function. #
Logistic.results <- DIF.Logistic(data = Likert.data[,1:12], group = Likert.data$group, sig.level = .05, purify = TRUE,
output.filename = "LogReg Output")
|
141b87962a14d7d43cf619567b58c8bb9fb7ffeb | 48155a2d7a1a7614aff7b2d123a5347398590e03 | /docs/maps_for_pubs/map_for_pub.R | ecad688d2160c6efa30a48058d5deeb990624390 | [
"MIT"
] | permissive | remi-daigle/MarxanConnect | 6e5faee774c4adaf9444266a39cbe9ae00be05ca | c88658413beaeebff03a8a4831f1fddd2cc0c618 | refs/heads/master | 2021-12-22T18:06:51.292239 | 2021-10-20T13:29:47 | 2021-10-20T13:29:47 | 96,797,881 | 9 | 6 | MIT | 2020-10-27T02:01:06 | 2017-07-10T16:19:25 | Python | UTF-8 | R | false | false | 11,782 | r | map_for_pub.R | library(sf)
library(raster)
library(gridExtra)
library(tidyverse)
library(magick)
BIORE <- read.csv("maps_for_pubs/input/spec.dat") %>%
select(name) %>%
filter(grepl("BIORE_",name)) %>%
mutate(name=gsub("BIORE_","",name)) %>%
unlist() %>%
as.numeric()
proj <- "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"
bioregions <- st_read("maps_for_pubs/bioregion_short.shp",stringsAsFactors = FALSE) %>%
filter(BIOREGI %in% BIORE) %>%
mutate(BIOREGI=paste0("BIORE_",BIOREGI)) %>%
st_transform(proj)
pu <- read.csv("maps_for_pubs/output/pu_no_connect.csv") %>%
mutate(geometry=st_as_sfc(geometry,"+proj=longlat +datum=WGS84"),
best_solution = as.logical(best_solution)) %>%
st_as_sf()%>%
st_transform(proj) %>%
select(pu_id)
# st_write(bioregions,"maps_for_pubs/bioregion_filtered.shp")
# intersection <- st_intersection(pu,bioregions)
# intersection$row <- 1:nrow(intersection)
# distances <- st_distance(intersection) %>%
# data.frame(check.names = FALSE) %>%
# mutate(row=row.names(.)) %>%
# gather(key="column",value="value",-row)
#
# distances$row <- as.numeric(distances$row)
# distances$column <- as.numeric(distances$column)
# # con_list <- distances
#
# con_list <- distances %>%
# left_join(data.frame(intersection),by=c("row"="row")) %>%
# mutate(id1=as.numeric(pu_id),
# from_BIO=BIOREGI) %>%
# select(row,column,id1,from_BIO,value) %>%
# left_join(data.frame(intersection),by=c("column"="row")) %>%
# mutate(id2=as.numeric(pu_id),
# to_BIO=BIOREGI) %>%
# select(id1,from_BIO,id2,to_BIO,value) %>%
# filter(from_BIO==to_BIO,
# as.numeric(value)>0) %>%
# mutate(habitat=to_BIO) %>%
# group_by(habitat,id1,id2) %>%
# summarize(value=1/mean(as.numeric(value))^2) %>%
# group_by(habitat,id1) %>%
# mutate(value=as.numeric(value)/sum(as.numeric(value)))
#
# write.csv(con_list,"maps_for_pubs/IsolationByDistance.csv",quote=FALSE,row.names = FALSE)
#
# con_list_mean <- con_list %>%
# group_by(id1,id2) %>%
# summarize(value=mean(value)) %>%
# mutate(habitat="mean") %>%
# as.data.frame() %>%
# complete(habitat,id1,id2,fill=list(value = 0)) %>%
# as.data.frame()
#
# write.csv(con_list_mean,"maps_for_pubs/IsolationByDistance_mean.csv",quote=FALSE,row.names = FALSE)
# CF_landscape maps for publication
# set default projection for leaflet
proj <- "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"
proj_rotated <- "+proj=omerc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +alpha=-73.00000"
spec <- read.csv("maps_for_pubs/input/spec.dat")
puvspr <- read.csv("maps_for_pubs/input/puvspr2.dat")
pu <- read.csv("maps_for_pubs/input/pu.dat")
puvspr_wide <- puvspr %>%
left_join(select(spec,"id","name"),
by=c("species"="id")) %>%
select(-species) %>%
spread(key="name",value="amount")
# planning units with output
output <- read.csv("maps_for_pubs/output/pu_no_connect.csv") %>%
mutate(geometry=st_as_sfc(geometry,"+proj=longlat +datum=WGS84"),
best_solution = as.logical(best_solution)) %>%
st_as_sf() %>%
left_join(puvspr_wide,by=c("pu_id"="pu"))%>%
st_transform(proj)
output_rotated <- st_transform(output,proj_rotated)
output_connect <- read.csv("maps_for_pubs/output/pu_connect.csv") %>%
mutate(geometry=st_as_sfc(geometry,"+proj=longlat +datum=WGS84"),
best_solution = as.logical(best_solution)) %>%
st_as_sf() %>%
left_join(puvspr_wide,by=c("pu_id"="pu"))%>%
st_transform(proj)
bioregions <- st_read("maps_for_pubs/bioregion_short.shp",stringsAsFactors = FALSE) %>%
st_transform(proj) %>%
mutate(BIOREGI=paste0("BIORE_",BIOREGI)) %>%
filter(BIOREGI %in% spec$name) %>%
group_by(BIOREGI,SHORT_D,ALT_LAB) %>%
summarize() %>%
data.frame() %>%
mutate(BIOREGI=as.character(BIOREGI),
SHORT_D=unlist(lapply(lapply(strsplit(SHORT_D," "),tail,-1),paste,collapse=" "))) %>%
st_as_sf(crs=proj)
# bioregions[nrow(bioregions)+1,] <- st_as_sf(data.frame(BIOREGI="Planning Units",
# SHORT_D="Planning Units",
# geometry=st_combine(output)))
# bioregions$BIOREGI[nrow(bioregions)]="Planning Units"
# get basemap outline of AUS
bb <- output %>%
st_bbox() %>%
st_as_sfc() %>%
st_transform(proj)
AUS <- getData(country="AUS",level=0) %>%
st_as_sf() %>%
st_transform(proj) %>%
st_intersection(st_buffer(bb,100000))
#### 3 panel #####
p1 <- ggplot(output_connect)+
geom_sf(data=AUS,fill="grey",colour="darkgrey")+
geom_sf(aes(fill=between_cent_land_pu_mean),colour="transparent",size=0.25)+
scale_fill_distiller("Betweeness\nCentrality",palette="Oranges",direction = 1)+
coord_sf(xlim=st_bbox(output_rotated)[c(1,3)],
ylim=st_bbox(output_rotated)[c(2,4)],
crs=proj_rotated,
expand = FALSE)+
theme_dark()+
theme(legend.position = "bottom",
legend.direction = "vertical",
legend.background = element_rect(fill="transparent"),
plot.title = element_text(size=10)) +
ggtitle("A) Connect. Feature")
p2 <- ggplot(output)+
geom_sf(data=AUS,fill="grey",colour="darkgrey")+
geom_sf(aes(fill=select_freq),colour="transparent",size=0.25)+
scale_fill_distiller("Selection\nFrequency",palette="Purples",direction = 1,limits=c(0,100))+
coord_sf(xlim=st_bbox(output_rotated)[c(1,3)],
ylim=st_bbox(output_rotated)[c(2,4)],
crs=proj_rotated,
expand = FALSE)+
theme_dark()+
theme(legend.position = "bottom",
legend.direction = "vertical",
legend.background = element_rect(fill="transparent"),
plot.title = element_text(size=10))+
ggtitle("B) Without Connectivity")
p3 <- ggplot(output_connect)+
geom_sf(data=AUS,fill="grey",colour="darkgrey")+
geom_sf(aes(fill=select_freq),colour="transparent",size=0.25)+
scale_fill_distiller("Selection\nFrequency",palette="Purples",direction = 1,limits=c(0,100))+
coord_sf(xlim=st_bbox(output_rotated)[c(1,3)],
ylim=st_bbox(output_rotated)[c(2,4)],
crs=proj_rotated,
expand = FALSE)+
theme_dark()+
theme(legend.position = "bottom",
legend.direction = "vertical",
legend.background = element_rect(fill="transparent"),
plot.title = element_text(size=10))+
ggtitle("C) With Connectivity")
ggsave("maps_for_pubs/maps.png", grid.arrange(p1, p2, p3, ncol=3),width=7.25,height=8,dpi=600)
#### difference ####
output_connect$diff <- output_connect$select_freq-output$select_freq
output_connect$rep_select_freq <- output$select_freq
oc_greater90 <- output_connect %>%
filter(rep_select_freq>quantile(rep_select_freq,probs=0.9),
select_freq>quantile(select_freq,probs=0.9))
oc_bottom5 <- output_connect %>%
filter(rep_select_freq<5,
select_freq<5)
oc_sfcon <- output_connect %>%
filter(diff>5)
oc_sfrep <- output_connect %>%
filter(diff<(-5))
base <- ggplot(oc_sfrep)+
geom_sf(data=AUS,fill="grey",colour="darkgrey")+
geom_sf(aes(fill=diff),colour="transparent")+
scale_fill_distiller("Selection frequency\nhigher without\nconnectivity",palette=c("Purples"),limits=c(-100,-5),
labels=c("100","75","50", "25", "5"))+
coord_sf(xlim=st_bbox(output_rotated)[c(1,3)],
ylim=st_bbox(output_rotated)[c(2,4)],
crs=proj_rotated,
expand = FALSE)+
theme_dark()+
theme(legend.position = c(1.64,0.2),
legend.direction = "vertical",
plot.margin = margin(t=5.5,b=5.5,l=0,r=140,unit = "pt"),
legend.title = element_text(size=10))+
guides(fill = guide_legend(title.position = "right",
reverse = TRUE))
ggsave("maps_for_pubs/base.png",base,height=6,width=4,dpi=600)
connect <- ggplot(oc_sfcon)+
geom_sf(aes(fill=diff),colour="transparent")+
scale_fill_distiller("Selection frequency\nhigher with\nconnectivity",palette=c("Oranges"),direction=1,limits=c(5,100))+
coord_sf(xlim=st_bbox(output_rotated)[c(1,3)],
ylim=st_bbox(output_rotated)[c(2,4)],
crs=proj_rotated,
expand = FALSE)+
theme_dark()+
theme(legend.position = c(1.64,0.8),
legend.direction = "vertical",
plot.margin = margin(t=5.5,b=5.5,l=0,r=140,unit = "pt"),
legend.title = element_text(size=10),
legend.background = element_rect(fill="transparent"),
rect = element_rect(fill = "transparent"),
panel.grid = element_line(colour="transparent"),
panel.background = element_rect("transparent"),
axis.ticks = element_line("transparent"),
axis.text.x = element_text(colour="transparent"),
axis.text.y = element_text(colour="transparent"))+
guides(fill = guide_legend(title.position = "right",
reverse = TRUE))
ggsave("maps_for_pubs/connect.png",connect,bg="transparent",height=6,width=4,dpi=600)
categorical <- ggplot()+
geom_sf(data=oc_greater90, aes(fill="A",colour="A"),size=0.3)+
geom_sf(data=oc_bottom5, aes(fill="B",colour="B"),size=0.3)+
scale_fill_manual("",values=c("#549b16","white"),labels=c("Always prioritized","Rarely selected"))+
scale_colour_manual("",values=c("#549b16","black"),labels=c("Always prioritized","Rarely selected"))+
coord_sf(xlim=st_bbox(output_rotated)[c(1,3)],
ylim=st_bbox(output_rotated)[c(2,4)],
crs=proj_rotated,
expand = FALSE)+
theme_dark()+
theme(legend.position = c(1.535,0.5),
legend.direction = "vertical",
plot.margin = margin(t=5.5,b=5.5,l=0,r=140,unit = "pt"),
legend.text = element_text(size=10),
legend.background = element_rect(fill="transparent"),
rect = element_rect(fill = "transparent",colour="transparent"),
panel.grid = element_line(colour="transparent"),
panel.background = element_rect("transparent"),
plot.background = element_rect("transparent"),
axis.ticks = element_line("transparent"),
axis.text.x = element_text(colour="transparent"),
axis.text.y = element_text(colour="transparent"))+
guides(fill = guide_legend(title.position = "right"))
ggsave("maps_for_pubs/categorical.png",categorical,bg="transparent",height=6,width=4,dpi=600)
categorical <- image_read("maps_for_pubs/categorical.png")
con <- image_read("maps_for_pubs/connect.png")
base <- image_read("maps_for_pubs/base.png")
composite <- image_composite(image_composite(base,con),categorical)
composite
image_write(composite,"maps_for_pubs/diff.png")
#### bioregions ####
bio <- ggplot()+
geom_sf(data=AUS,fill="grey",colour="transparent")+
geom_sf(data=bioregions,aes(fill="A",colour="A"))+
geom_sf(data=output_rotated,size=0.05,aes(fill="red",colour="red"))+
facet_wrap(~ALT_LAB,nrow=2)+
scale_fill_manual("",values=c("#1f78b4","transparent"),labels=c("Bioregion","Planning Units"))+
scale_colour_manual("",values=c("transparent","black"),labels=c("Bioregion","Planning Units"))+
coord_sf(xlim=st_bbox(output_rotated)[c(1,3)],
ylim=st_bbox(output_rotated)[c(2,4)],
crs=proj_rotated,
expand = FALSE)+
theme_dark()+
theme(legend.position = "bottom",
legend.direction = "horizontal")
ggsave("maps_for_pubs/bio_maps.png", bio,width=8.25,height=7.5,dpi=600)
|
7bf0ca90fb54c437312e74f803736c0a1990b909 | 3f69b19b3720a81fb37070fc31304de639785ed7 | /man/Traj3DResampleTime.Rd | 3d3ffaff42705479d9844d73f613515817443c4a | [] | no_license | JimMcL/trajr | 42b0150b26dc2d3d7d3992cff4b4aca51dbd7d25 | 6998b877f258030df345c7d114e07c41158f3d8e | refs/heads/master | 2023-07-21T16:17:33.511137 | 2023-07-10T00:53:39 | 2023-07-10T00:53:39 | 111,262,381 | 21 | 7 | null | null | null | null | UTF-8 | R | false | true | 1,059 | rd | Traj3DResampleTime.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/3D.R
\name{Traj3DResampleTime}
\alias{Traj3DResampleTime}
\title{Resample a 3D trajectory to a constant time interval}
\usage{
Traj3DResampleTime(trj3d, stepTime, newFps = NULL)
}
\arguments{
\item{trj3d}{The 3-dimensional trajectory to be resampled.}
\item{stepTime}{The resampled trajectory step time. Each step in the new
trajectory will have this duration.}
\item{newFps}{Value to be stored as the FPS value in the new trajectory (see
\code{\link{TrajGetFPS}}). It is not otherwise used by this function.}
}
\value{
A new 3-dimensional trajectory with a constant time interval for each
step. Points in the new trajectory are calculated by linearly interpolating
along \code{trj3d}.
}
\description{
Constructs a new 3-dimensional trajectory by resampling the input trajectory
to a fixed time interval. Points are linearly interpolated along the
trajectory. Spatial and time units are preserved.
}
\seealso{
\code{\link{Traj3DFromCoords}}, \code{\link{TrajResampleTime}}
}
|
597771969653d62b0cfc64e57c8993d9d9b4c2ca | f5addbf749dd8bba26d15b891fc1e51ad95072f6 | /plot4.R | cda44e2a6f5d2156b7905aaf712ab2c27661af2b | [] | no_license | Layla-ElAsri/ExData_Plotting1 | 95aee14bb9fa083a9ecfe124d0712aba25c33f1a | 40d522819427b41a1e1e65d3cdb85ad56bf87057 | refs/heads/master | 2021-01-20T21:44:50.858410 | 2014-07-13T12:00:25 | 2014-07-13T12:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,166 | r | plot4.R | file = "household_power_consumption.txt"
data = subset(data.frame(read.table(file, header = TRUE, sep=";", na.strings="?")), Date=="1/2/2007" | Date=="2/2/2007")
dateTime = paste(data$Date, data$Time)
dateTime = strptime(dateTime, format='%d/%m/%Y %H:%M:%S')
png(file="plot4.png", width=480, height=480, bg = "transparent")
par(mfrow = c(2, 2))
with(data, plot(dateTime, Global_active_power, type="l", ylab="Global Active Power", xlab = ""))
with(data, plot(dateTime, Voltage, type="l", ylab="Voltage", xlab = "datetime"))
maxS = max(c(data$Sub_metering_1, data$Sub_metering_2, data$Sub_metering_3))
with(data, plot(dateTime, Sub_metering_1, type="l", ylab="Energy sub metering", xlab = "", ylim = c(0,maxS)))
par(new = T)
with(data, plot(dateTime, Sub_metering_2, col = "blue", type="l", ylab="", xlab = "", ylim = c(0,maxS)))
par(new = T)
with(data, plot(dateTime, Sub_metering_3, col = "red", type="l", ylab="", xlab = "", ylim = c(0,maxS)))
legend("topright", lty = 1, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(data, plot(dateTime, Global_reactive_power, type="l", xlab = "datetime"))
dev.off()
|
e68d82d398dd20e1c6e1af981daff90bbab7afe1 | 04616643c5da76e475506c7fa96e40c25bd36555 | /R/init_queue.R | 49bae52239ed684a71121654ec754d510ba741eb | [] | no_license | edonnachie/queuer | 3139410cb8917922a75a42107b19481d47837cff | c255c35384f53e919db74d33b629ddac4f236b0e | refs/heads/master | 2021-01-10T12:30:13.101914 | 2017-08-18T15:47:07 | 2017-08-18T15:47:07 | 44,825,642 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 243 | r | init_queue.R | #' Initialise a queue directory
#'
#' @param queue Directory in which queue should be created
#' @return Nothing
init_queue <- function(queue){
if(!dir.exists(queue)){
dir.create(paste0(queue, "/archive"), recursive = TRUE)
}
} |
fca747d456d7f0ee5e62e110dd4ff56cf874e5bb | 489e56df1993b3d3cb56011ce79e1130f24c5dae | /subsetting_a_matrix.R | f46f1c08507f2f389718de1111ff727e170a08d5 | [] | no_license | adalee2future/learn-R | b50d84948861b95cf24919e3789773fcae94d36e | 8002acf15371271401428f7b3fcd8b37d96abfe7 | refs/heads/master | 2021-01-10T21:22:52.921444 | 2015-06-15T02:25:59 | 2015-06-15T02:25:59 | 23,581,876 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 96 | r | subsetting_a_matrix.R | x <- matrix(1: 6, 2, 3)
x[1, 2]
x[2, 1]
x[1, ]
x[, 2]
x[1, 2, drop = FALSE]
x[1, , drop = FALSE] |
f1509a19cadb4f7700f62b788a45c3893e0c26f7 | 76abe33b0dac505b1f7d771c799e18b57a8f4417 | /shiny/Change the appearance of the dashboard.R | 808ce8921b68a2a239af2530db7d576b48180519 | [] | no_license | jyeazell/DataCamp_practice | 4ddaf889b07a2ef3fcd0965bee7d71372e3eb2f3 | de4443e01d5414913aa555a5771d5eadc9f83700 | refs/heads/master | 2022-12-19T23:27:19.410533 | 2020-10-09T20:31:07 | 2020-10-09T20:31:07 | 183,300,581 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 444 | r | Change the appearance of the dashboard.R | ##'Change the appearance of the dashboard
##'
##'Walt Disney's favorite color is purple (mine too!). Using the application
##'you've just created, let's change the color by updating the skin parameter
##'to "purple". The body you just created is already loaded.
# Update the skin
ui <- dashboardPage(
skin = "purple",
header = dashboardHeader(),
sidebar = dashboardSidebar(),
body = body)
# Run the app
shinyApp(ui, server)
|
014c8003a3612ce7fb53507c273c02226044f08f | 3bf5ba75fd0e5044c0d1bc60f53bf6de2a439965 | /complete.R | 415d20847a71a7814debd76e97c654a7a301ff1e | [] | no_license | YChekalin/ds-repo | 51af396e0420909e267c7a0191a56690ec5008c7 | 690ede515f1eaeac5f175c445e53cee076a5fce6 | refs/heads/master | 2021-01-13T02:06:53.792602 | 2015-02-19T04:06:34 | 2015-02-19T04:06:34 | 30,549,922 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 457 | r | complete.R |
complete <- function(directory, id = 1:332) {
data <- data.frame()
nobs <- data.frame(id=id,"nobs"=0)
file_list <- list.files(directory, full.names=TRUE)
for(fname in file_list) {
tmp <- read.table(fname, header = TRUE, sep = ",")
tmp <- subset(tmp, ID %in% id)
data <- rbind(data,tmp)
}
for(id2 in id) {
tmp2 <- complete.cases(data[data$ID==id2,])
nobs[id==id2,2] <- length(tmp2[tmp2==TRUE])
}
return(nobs)
} |
7a8245c3bf484b4b85cdb91eef227fe0f92dcc4c | 469614d19085ebac7158d06fb10078707a0e5061 | /OxBS_processing.R | b3587d854acd977d9fc249c0f95c1c518cfc96c6 | [
"MIT"
] | permissive | estoyanova/EStoyanova_eLife_2021 | 9781fa68abb8ea1997de97b94fceac839a586997 | 4c2fa6c102e4f868fc91e0dcb0b8c7155b8f8712 | refs/heads/main | 2023-07-02T14:50:58.195174 | 2021-10-27T01:57:19 | 2021-10-27T01:57:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 801 | r | OxBS_processing.R |
# QC and trimming of FastQ Files
trim_galore --stringency 3 --fastqc --paired --clip_R1 5 --clip_R2 $R1.fastq $R2.fastq
# Alignment with Bismark
bismark --bowtie2 -p 4 --multicore 4 bismark_genome_build/ -1 R1.fq -2 R2.fq
deduplicate_bismark -p --bam inputfile.bam
# Sequencing quality control with CEGX custom
docker run -v=`pwd`:/Data -it cegx_bsexpress_0.6 auto_bsExpress
# Downstream processing with Methpipe from Smith lab
to-mr -o bismark.deduplicated.bam.mr -m bismark deduplicated.bam
LC_ALL=C sort -k 1,1 -k 3,3n -k 2,2n -k 6,6 -o out.mr.sorted bismark.deduplicated.bam.mr
methcounts -c genome.fa -o output.allC.meth *.sorted.mr &
mlml -u *BS*.meth -m *OX*.meth -o *.mlml.txt
#Merging symmetric CpGs, necessary for MethylSeekR
symmetric-cpgs -o symmetric*.meth -v -m input.meth &
|
5f581845d623a9575b0da58f63e4892ec0286024 | f5722d02cdd053c95eb3e5feb64b4e1338564f5f | /rb.R | 827cfd56d433a49c1cd252b0d2d40973ced28f17 | [] | no_license | sethf26/NFLSuccessFromCollegeStats | 1976a4884f72d9235a2299fc280b26c5b1b7b4e7 | d913bd2842b3cf60d2c08420e5b7444a466ad32b | refs/heads/main | 2023-02-04T11:13:16.099004 | 2020-12-14T23:36:17 | 2020-12-14T23:36:17 | 304,743,450 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,964 | r | rb.R | tabPanel(
sidebarPanel(
h3("Regression of Running Backs prior Stats on NFL Value and Statistics"),
selectInput(inputId = "selection2",
label = "Choose a position",
choices = c("NFLYPC" = "RB$NFLYPC ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Rushing Attempts" = "RB$NFL.rushattmpt.pg ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Rushing Yards" = "RB$NFL.rushyrds.pg ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Rushing TDs" = "RB$College.Rushing.TDs ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Value Per Year" = "RB$ValuePerYear ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Receptions" = "RB$college.receptions.pg ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Receiving Yards" = "RB$college.receivingyrds.pg ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Receiving TDs" = "RB$`Receiving TD` ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS",
"Yards From Scrimmage" = "RB$College.AVGYFS ~ RB$College.YPC + RB$college.rushattmpt.pg + RB$college.rushtd.pg + RB$College.Rushing.Yards + RB$Rnd + RB$College.Receptions + RB$College.Receiving.TDs + RB$College.Receiving.Yards + RB$College.AVGYFS")
))),
gt_output("model5")
output$model5 <- render_gt({
model_5 <- stan_glm(formula = input$selection2,
data = RB,
refresh = 0)
model_5 %>%
tidy %>%
gt() %>%
tab_header(title = "Runningbacks NFL Stat predictions from College Stats")
})
|
93610ecfb4399863d421d74fad1faecf9cb455d4 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/cairoScaledFontGlyphExtents.Rd | f50363fbb957a00edef1dc56a5c758565358941f | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 1,303 | rd | cairoScaledFontGlyphExtents.Rd | \alias{cairoScaledFontGlyphExtents}
\name{cairoScaledFontGlyphExtents}
\title{cairoScaledFontGlyphExtents}
\description{Gets the extents for a list of glyphs. The extents describe a
user-space rectangle that encloses the "inked" portion of the
glyphs, (as they would be drawn by \code{\link{cairoShowGlyphs}} if the cairo
graphics state were set to the same font_face, font_matrix, ctm,
and font_options as \code{scaled.font}). Additionally, the x_advance and
y_advance values indicate the amount by which the current point
would be advanced by \code{\link{cairoShowGlyphs}}.}
\usage{cairoScaledFontGlyphExtents(scaled.font, glyphs, num.glyphs)}
\arguments{
\item{\verb{scaled.font}}{[\code{\link{CairoScaledFont}}] a \code{\link{CairoScaledFont}}}
\item{\verb{glyphs}}{[\code{\link{CairoGlyph}}] a list of glyph IDs with X and Y offsets.}
\item{\verb{num.glyphs}}{[integer] the number of glyphs in the \code{glyphs} list}
}
\details{Note that whitespace glyphs do not contribute to the size of the
rectangle (extents.width and extents.height). }
\value{
A list containing the following elements:
\item{\verb{extents}}{[\code{\link{CairoTextExtents}}] a \code{\link{CairoTextExtents}} which to store the retrieved extents.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
3b7e8273e0abf7c33b18c6bb77142f4772a05a91 | 2b75a4b36bcdf7ca3bc1a80b8ee391e25318bfca | /man/read.moleculelist_thunderstorm.Rd | 6659368d7a00818fb59278408086639a313315c2 | [
"MIT"
] | permissive | keithschulze/supr | c67374d737ef8e77966ae057e8f6219490ce6a6a | 27902504757e33d1e4e3c7d3ba6baf0d7b808848 | refs/heads/master | 2022-12-03T07:02:25.029644 | 2022-11-23T07:21:50 | 2022-11-23T07:21:50 | 43,782,983 | 0 | 0 | null | 2016-05-12T03:34:19 | 2015-10-06T22:41:46 | R | UTF-8 | R | false | true | 634 | rd | read.moleculelist_thunderstorm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{read.moleculelist_thunderstorm}
\alias{read.moleculelist_thunderstorm}
\title{Thunderstorm molecule list reader}
\usage{
read.moleculelist_thunderstorm(filepath)
}
\arguments{
\item{filepath}{string denoting the path to the file to be read.}
}
\value{
\code{\link{ppp}} object containing the coordinates for
single molecule localisations and other parameters/columns in
original csv file attached as marks.
}
\description{
Reader function for comma separated molecule list output of the
Thunderstorm plugin for ImageJ/Fiji.
}
|
a8e3f47da86550528b50c4d44eda7da64ff82cf4 | 04efe01489384e0babe71e1a0548e9d589c32166 | /Heping/S7MLR2.R | 1514794e6eccf67296550d60dd0f402432d896cb | [] | no_license | hpzheng/sys6021_codes | 5749645527373bd6c86b05f1bddfdcb791f04cc3 | 13f87e0bec8e5f198f8cdee157b65d6e61d2d753 | refs/heads/master | 2021-01-10T10:07:40.921116 | 2015-10-14T17:59:38 | 2015-10-14T17:59:38 | 44,264,256 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,257 | r | S7MLR2.R | # Session 7
#
# Multiple Linear Regression 2
#
#******************************************************
#load data
source("AccidentInput.R")
setwd(traindir)
my.path <- getwd()
setwd(my.path)
acts <- file.inputl(my.path)
sapply(acts, dim)
dim(acts[[12]])
setdiff(colnames(acts[[1]]), colnames(acts[[8]]))
comvar <- intersect(colnames(acts[[1]]),colnames(acts[[8]]))
totacts <- combine.data(acts, comvar)
##Build a data frame xdmg with only extreme accidents for ACCDMG
# Remove duplicates from xdmg and call new data frame xdmgnd
##*******************************************
## Build linear regression models in R: lm ##
##*******************************************
# Linear regression models with quantitative predictors
xdmgnd.lm1<-lm(ACCDMG~TEMP,data=xdmgnd)
xdmgnd.lm2<-lm(ACCDMG~TEMP+TRNSPD,data=xdmgnd)
xdmgnd.lm3<-lm(ACCDMG~TEMP+TRNSPD+TONS,data=xdmgnd)
# The next two lines of R code are equivalent
xdmgnd.lm4<-lm(ACCDMG~TEMP+TRNSPD+TONS+CARS,data=xdmgnd)
xdmgnd.lm4<-lm(ACCDMG~.,data=xdmgnd[,c('ACCDMG','TEMP','TRNSPD','TONS','CARS')])
# Display regression results:
summary(xdmgnd.lm1)
# You should be able to find: estimated coefficients, residuals, t-test results, F test results, R^2, adjusted R^2,
names(xdmgnd.lm1)
##What are the coefficients of each of the linear models?
coef(xdmgnd.lm1)
##what is the sum of the residuals squared?
sum(xdmgnd.lm1$res^2)
################################################ Metrics and Variable Selection ################################################
##*******************************
## Criterion based assessments ##
##*******************************
# Adjusted R^2:
summary(xdmgnd.lm1)$adj.r.squared
# AIC:
AIC(xdmgnd.lm1)
#BIC:
AIC(xdmgnd.lm1,k=log(nrow(totacts)))
# Assess the 4 models using criterion based assessment approaches. Which performs better based on each criterion?
##************************
## Stepwise Regression ##
##************************
xdmgnd.lm4.step<-step(xdmgnd.lm4)
# If you have many predictors, it will take some time to get results. To save time, you can set 'trace=F' to get reults without showing each step:
xdmgnd.lm4.step<-step(xdmgnd.lm4, trace=F)
summary(xdmgnd.lm4.step)
# What predictors are left in your stepwise model?
##******************
## Partial F Test ##
##******************
# Recall that we can only compare two nested models by partial F test:
anova(xdmgnd.lm1,xdmgnd.lm2)
# Compare your stepwise model to model 4 using the partial F test.
##******************
## Test Sets ##
##******************
setwd(sourcedir)
source("TestSet.R")
#set test sets size:
test.size<-1/3
# generate training sets and test sets from original data:
xdmgnd.data<-test.set(xdmgnd,test.size)
# Check distribution of ACCDMG of test set, training set:
par(mfrow=c(2,2))
hist(xdmgnd.data$train$ACCDMG)
hist(xdmgnd.data$test$ACCDMG)
hist(xdmgnd$ACCDMG)
par(mfrow=c(1,1))
# Are the training and test sets representative of total data?
# Build model with train set:
xdmgnd.lm4.train<-lm(ACCDMG~TEMP+TRNSPD+TONS+CARS,data=xdmgnd.data$train)
# Recall that we need to measure predicted MSE.
# First, how to predict with lm models:
xdmgnd.lm4.pred<-predict(xdmgnd.lm4.train,newdata=xdmgnd.data$test)
# Next, compute PMSE:
pmse.xdmgnd.lm4<-mse(xdmgnd.lm4.pred,xdmgnd.data$test$ACCDMG)
pmse.xdmgnd.lm4
# Compare xdmgnd.lm4 and xdmgnd.lm3 based on PMSE. Which model performs better?
##********************
## Cross-Validation ##
##********************
# Need the boot library
library(boot)
# You need to use glm (a funciton to estimate generalized linear model) instead of lm. Don't be confused by generalized linear models.
# Because lm is a special case of glm, glm function can be used to estimate lm models as long as you set parameters correctly.
xdmgnd.lm4.cv<-glm(ACCDMG~TEMP+TRNSPD+TONS+CARS,data=xdmgnd)
# Cross-validation:
xdmgnd.lm4.err<-cv.glm(xdmgnd,xdmgnd.lm4.cv,K=10)
xdmgnd.lm4.err$delta
#There are two components for estimated errors: the first is the raw cross-validation estimate of prediction error; the second is the adjusted cross-validation estimate.
# Compare xdmgnd.lm4 and xdmgnd.lm3 based on adjusted cross-validation estimate. Which model performs better? |
a253824479514c0f59a4d3bde88bc75c786bd4ab | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/PLRModels/examples/plrm.ci.Rd.R | 0451962195e7cbc2181b9c1f9f4cfe80a8b84f56 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,000 | r | plrm.ci.Rd.R | library(PLRModels)
### Name: plrm.ci
### Title: Confidence intervals estimation in partial linear regression
### models
### Aliases: plrm.ci
### Keywords: Statistical Inference Regression Time Series
### ** Examples
# EXAMPLE 1: REAL DATA
data(barnacles1)
data <- as.matrix(barnacles1)
data <- diff(data, 12)
data <- cbind(data,1:nrow(data))
b.h <- plrm.gcv(data)$bh.opt
b1 <- b.h[1]
## Not run: plrm.ci(data, b1=b1, b2=b1, a=c(1,0), CI="all")
## Not run: plrm.ci(data, b1=b1, b2=b1, a=c(0,1), CI="all")
# EXAMPLE 2: SIMULATED DATA
## Example 2a: dependent data
set.seed(123)
# We generate the data
n <- 100
t <- ((1:n)-0.5)/n
m <- function(t) {t+0.5}
f <- m(t)
beta <- c(0.5, 2)
x <- matrix(rnorm(200,0,3), nrow=n)
sum <- x%*%beta
sum <- as.matrix(sum)
eps <- arima.sim(list(order = c(1,0,0), ar=0.7), sd = 0.1, n = n)
eps <- as.matrix(eps)
y <- sum + f + eps
data_plrmci <- cbind(y,x,t)
## Not run: plrm.ci(data, a=c(1,0), CI="all")
## Not run: plrm.ci(data, a=c(0,1), CI="all")
|
68b23b8bb3f951e5d3eb2afaa6a6a7b0b019891a | 96a66b3b1e65e1a25951349d03bf122c1879f08d | /man/analyze.Rd | 0d222c719c0a1120def33b897c0668cb55111134 | [] | no_license | cran/LN3GV | 66ed033943771074f9c1252d7ccdf4d4656d227c | 6945374aa31fe40d2c06fda296290f58b58c3151 | refs/heads/master | 2016-08-04T08:25:25.833761 | 2011-07-26T00:00:00 | 2011-07-26T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,943 | rd | analyze.Rd | % File src/library/LN3GV/man/analyze.Rd
\name{analyze}
\alias{analyze}
\title{Fit a hierarchical model to matrix of normalized microarray data}
\description{
Analyze microarray data using the methods detailed in Lund and Nettleton, 2011. This is the main function of the LN3GV package.}
\usage{
analyze(dat,pats,method,log.scale=TRUE,MaxIt=30,
parm.conv=.005,prob.conv=.0005,parm.init=NULL,prob.init=NULL)
}
\arguments{
\item{dat}{microarray data matrix of normalized intensities from microarray experiment. Each row contains observations from a single gene.
Each column contains observations from a single experimental unit.}
\item{pats}{matrix in which each row describes a unique expression pattern. Must have same number of columns as \code{dat}. For each pattern, experimental units sharing a common integer are assumed to be equivalently expressed.}
\item{method}{method used to analyze data. Must be one of "LNNMV*", "LNNGV", "LN3", "LN3MV*", or "LN3GV"}
\item{log.scale}{logical. If false, function will perform analysis using \code{log(dat)}}
\item{MaxIt}{maximum number of EM algorithm iterations used to estimate prior probabilities for expression patterns and model parameters.}
\item{parm.conv}{Until iteration \code{MaxIt}, EM algorithm will not stop while the difference between consecutive iterations in estimates for any parameter is greater than \code{parm.cov} times the updated parameter estimate. May be specified as single number or a vector with length equal to number of model parameters (see details).}
\item{prob.conv}{Until iteration \code{MaxIt}, EM algorithm will not stop while the difference between consecutive iterations in prior probability estimates for any pattern is greater than \code{prob.conv}. May be specified as single number or a vector with length equal to \code{nrow(pats)}.}
\item{parm.init}{Optional. Provides initial estimates for model parameters. See details. }
\item{prob.init}{Optional. Provides initial estimates of prior probabilities (or mixing proportions) for expression patterns.}
}
\details{
Order of model parameters optimized using EM algorithm for each method:
LNNMV* and LNNGV: Treatment variance (vt), mu. Error variance parameter values are not chosen using EM algorithm for these methods.
LN3: Gene variance (vg), Treatment variance (vt), Error variance (vr), mu
LN3MV* and LN3GV: Gene variance (vg), Treatment variance (vt), mu. Error variance parameter values are not chosen using EM algorithm for these methods.
When estimating error variances, experimental units for which the corresponding columns of \code{pats} are identical are assumed to be replicates.
}
\value{list containing:
\item{"Pattern Probs"}{matrix the describes posterior probabilities for expression patterns for each gene}
\item{"Parameter Estimates"}{vector providing estimated prior probabilities for expression patterns and model parameters.}
}
\author{Steve Lund \email{[email protected]}}
\examples{
### Create example data set from LN3MV model.
dat<-matrix(rnorm(500),50,10)*rgamma(50,.5,1)+rnorm(50,0,2);
### Make first 25 example genes differentially expressed.
### Suppose 2 conditions with 4 and 6 reps, respectively.
dat[1:25,1:4]<-dat[1:25,1:4]+rnorm(25)
dat[1:25,5:10]<-dat[1:25,5:10]+rnorm(25)
dat[26:50,]<-dat[26:50,]+rnorm(25)
### Create matrix defining possible expression patterns.
### First row is pattern for equivalent expression.
### Second row is pattern for differential expression across conditions.
pats<-rbind(rep(1,10),rep(1:2,c(4,6)))
### Analyze data using each method
analyze(dat,pats,method="LN3GV")
analyze(dat,pats,method="LN3MV*")
analyze(dat,pats,method="LN3")
analyze(dat,pats,method="LNNGV")
analyze(dat,pats,method="LNNMV*")
}
\keyword{microarray, differential expression}
|
75775ad2cbe47ecdad8e58cfd5aa768ee067a5ec | a8c5d8db03159c81e0b296f18179d3cd1c56ae00 | /man/package_glob.Rd | d683af383acdb9414404e00a8e816df9f5e98d39 | [] | no_license | cran/deepredeff | a42ccc7e75ae2d903496d31715a17bf01a94e3ea | 9b2888b98bf1c4143f499768355ab058d4703cde | refs/heads/master | 2023-06-18T04:24:21.440705 | 2021-07-16T08:30:02 | 2021-07-16T08:30:02 | 307,944,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 325 | rd | package_glob.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{package_glob}
\alias{package_glob}
\title{Wildcard Expansion on File Paths}
\usage{
package_glob(..., pattern)
}
\arguments{
\item{...}{Path}
\item{pattern}{Pattern}
}
\value{
Glob
}
\description{
Wildcard Expansion on File Paths
}
|
cfe87a13f160ea5a70754970eb8f1db04818e081 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/powdR/examples/fps.Rd.R | 918a76fcf46d313c215b297527222c84456b645c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 984 | r | fps.Rd.R | library(powdR)
### Name: fps
### Title: Full pattern summation
### Aliases: fps
### ** Examples
#Load the minerals library
data(minerals)
# Load the soils data
data(soils)
#Since the reference library is relatively small,
#the whole library can be used at once to get an
#estimate of the phases within each sample.
## Not run:
##D fps_sand <- fps(lib = minerals,
##D smpl = soils$sandstone,
##D refs = minerals$phases$phase_id,
##D std = "QUA.1",
##D align = 0.2)
##D
##D fps_lime <- fps(lib = minerals,
##D smpl = soils$limestone,
##D refs = minerals$phases$phase_id,
##D std = "QUA.1",
##D align = 0.2)
##D
##D fps_granite <- fps(lib = minerals,
##D smpl = soils$granite,
##D refs = minerals$phases$phase_id,
##D std = "QUA.1",
##D align = 0.2)
## End(Not run)
|
e73c1b2a31609b50c2de9cae5857dae002d4d3f0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/nor1mix/examples/rnorMix.Rd.R | ce735873c1b589ed935e670ae87369f86ac4bea1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 344 | r | rnorMix.Rd.R | library(nor1mix)
### Name: rnorMix
### Title: Generate 'Normal Mixture' Distributed Random Numbers
### Aliases: rnorMix
### Keywords: distribution
### ** Examples
x <- rnorMix(5000, MW.nm10)
hist(x)# you don't see the claw
plot(density(x), ylim = c(0,0.6),
main = "Estim. and true 'MW.nm10' density")
lines(MW.nm10, col = "orange")
|
a1fa00b26aa825069bf7790adfc5f15aab3919bf | 6c1926b99503f6304d35ba383538c9c365242bb1 | /man/get.var.Rd | f636a01b8bdb8c2fed30ceb83d0fa239041bfe80 | [] | no_license | smorisseau/dhstools | 56e1451de1124ac0f7943c7710a03a13b5fcca22 | a8ba0addb7cae06cf085ebe08e9136bef04ed87f | refs/heads/master | 2021-01-17T15:33:10.641739 | 2014-03-25T15:37:50 | 2014-03-25T15:37:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,008 | rd | get.var.Rd | \name{get.var}
\alias{get.var}
\title{helper fns for dhstools}
\usage{
get.var(survey.data, var, default = NA)
}
\arguments{
\item{survey.data}{the survey dataset}
\item{var}{either NULL, a column name, or a vector of
values}
\item{default}{the default value to fill in if the
variable is not found}
}
\value{
a vector of values whose length is the same as the number
of rows in survey.data; if var is NULL, this has the
default values
}
\description{
(NB: get.var and get.weights are taken from the
networkreporting package)
}
\details{
get a variable from a dataframe or vector
this function was written because a few of the estimator
functions need to use weights, and there are several
cases to handle: the user could pass in a column name, a
vector of weights, or nothing (in which case, the weights
should default to 1 for each row in the dataset). for the
special case of getting weights, look at the curried fn
get.weights (right below)
}
\keyword{internal}
|
197980e3a6c7f78789225305351fa0bbbc0e0cf7 | 8dc8fc6b022a02db14ed39d82cd222a4c17df3eb | /scripts/mir_gene_families.R | 33af2ced701ab2f1e4cff2d4740bb9638d316514 | [
"MIT"
] | permissive | BleekerLab/small-rna-seq-pipeline | 977c9da4bde224726beaae5599ec1331d2430e62 | bbd9ef8c6741dc59f3cdafc35da9cb6ea48b31f9 | refs/heads/master | 2022-09-10T04:12:24.630033 | 2022-08-01T18:01:12 | 2022-08-01T18:01:12 | 172,049,835 | 5 | 2 | MIT | 2020-11-17T13:26:02 | 2019-02-22T10:51:51 | Perl | UTF-8 | R | false | false | 1,001 | r | mir_gene_families.R | suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(svglite))
# capture the command-line arguments after --args (e.g. the shortstack results directory)
args <- commandArgs(trailingOnly = TRUE)
blast_file = args[1]
output_png = args[2]
output_svg = args[3]
# read the blast result file
df = read.delim(file = blast_file,header = T,stringsAsFactors = F)
# count occurences
MIR.freqs = as.data.frame(table(df$subject_id))
colnames(MIR.freqs)=c("MIR","Counts")
# plot N clusters = f(DicerCall)
g <- ggplot(data = MIR.freqs,aes(x=MIR,y=Counts,fill=MIR)) +
geom_bar(stat="identity",color="black") +
scale_fill_brewer(palette="Set3") +
labs(x = "MIR gene family",y="Counts")
# save the plots
ggsave(filename = output_png,plot = g,width = 7,height = 5,dpi = 400,device = "png")
ggsave(filename = output_svg,plot = g,width = 7,height = 5,device = "svg")
|
be45656e6a0bb4aea149b698e34513ef5aeb4bb3 | 7dd51c0c6137f8a32a6e2f265874acfcb0c0b5f8 | /old20200619/code/02_MDSC_MAC1.R | 49ccee081876c63cc0f7aa87c296ebc47ae9ef4f | [] | no_license | Winnie09/GBM_myeloid | 7d3c657f9ec431da43b026570e5684b552dedcee | a931f18556b509073e5592e13b93b8cf7e32636d | refs/heads/master | 2023-02-04T04:29:50.274329 | 2020-12-20T21:16:16 | 2020-12-20T21:16:16 | 268,260,236 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,556 | r | 02_MDSC_MAC1.R | library(Matrix)
source('/home-4/[email protected]/scratch/Wenpin/trajectory_variability/function/01_function.R')
# pseudotime <- readRDS('/home-4/[email protected]/scratch/Wenpin/GBM_myeloid/data/order/MDSC_MAC3_NEU1.rds')
pseudotime <- readRDS('/home-4/[email protected]/scratch/Wenpin/GBM_myeloid/data/order/MDSC_MAC1.rds')
rdir <- '/home-4/[email protected]/scratch/Wenpin/GBM_myeloid/result/M_MDSC/MDSC_MAC1/'
dir.create(rdir, showWarnings = F, recursive = T)
setwd(rdir)
cnt <- readRDS('/home-4/[email protected]/data2/whou10/GBM/singleObject/M/M.rds')
# # ### subset a small test set
# set.seed(12345)
# id1 = sample(rownames(cnt),3)
# cnt <- cnt[id1, ]
#####
meta <- readRDS('/home-4/[email protected]/data2/whou10/GBM/singleObject/M/meta.rds')
cnt <- cnt[, pseudotime]
cellanno <- data.frame(cell = colnames(cnt), sample = sapply(colnames(cnt), function(i) sub('_.*','',sub('.*-','',i))), stringsAsFactors = FALSE)
mdsc <- read.csv('/home-4/[email protected]/data2/whou10/GBM/meta/mdsc_proportions.csv', header = T)
design <- data.frame(MdscProp = mdsc[,8]) ## 3 is E-MDSC, 8 is M-MDSC
rownames(design) <- as.character(mdsc[,2])
cellanno <- cellanno[cellanno[,2] %in% rownames(design),]
cnt <- cnt[, cellanno[,1]]
pseudotime = pseudotime[pseudotime %in% cellanno[,1]]
cnt <- as.matrix(cnt)
cnt <- cnt[rowMeans(cnt>0.1)>0.01,] ## filter genes
### algo
psn <- seq(1, length(pseudotime))
names(psn) <- pseudotime
design = cbind(1, design)
res <- testpt(expr=cnt,cellanno=cellanno,pseudotime=psn,design=design,ncores=8, permuiter=100)
saveRDS(res, 'final.rds')
|
fac822ee49fa0b6f8d4a51339caf370af8ca41e3 | c28c69b1600f046e15824b810deedb5461182b3f | /inst/shiny/global.r | e509d1153c3b292c52b7115673d37708eba250f7 | [] | no_license | choi-phd/TestDesign | 14bd43139f54d377e96d5d0ee3d12427736cd8a6 | 58b5fb1c09b0dc8264173fe485e3de5f251f0715 | refs/heads/main | 2023-04-19T20:42:23.427342 | 2023-03-18T20:46:52 | 2023-03-18T20:46:52 | 174,591,899 | 4 | 6 | null | 2023-01-27T01:20:16 | 2019-03-08T18:52:45 | R | UTF-8 | R | false | false | 1,253 | r | global.r | library(shiny, quietly = TRUE)
library(shinythemes, quietly = TRUE)
library(shinyWidgets, quietly = TRUE)
suppressPackageStartupMessages(library(shinyjs, quietly = TRUE, warn.conflicts = FALSE))
library(DT, quietly = TRUE, warn.conflicts = FALSE)
library(TestDesign, quietly = TRUE)
solvers <- c("lpSolve", "Rsymphony", "gurobi", "Rglpk")
accepted_files <- c("text/csv", "text/comma-separated-values,text/plain", ".csv")
css_y <- "overflow-y:scroll; max-height: 65vh"
parseText <- function(arg_text) {
# Limit text to only have legit values
txt <- gsub("[^0-9\\., \\-]", "", arg_text)
return(txt == arg_text)
}
parseObject <- function(arg_object) {
if (is.null(arg_object)) {
return(NULL)
}
return(arg_object)
}
first_obj <- TRUE
assignObject <- function(obj, objname, desc) {
if (first_obj) {
first_obj <<- FALSE
message("\nRefresh the environment tab to see the objects in the list.")
}
assign(objname, obj, envir = .GlobalEnv)
tmp <- sprintf("%-48s assigned to : %s", desc, objname)
message(tmp)
}
updateLogs <- function(v, newlog) {
v$logs <- c(v$logs, newlog)
v$logs_text <- paste0(v$logs, collapse = "\n")
return(v)
}
getTempFilePath <- function(fname) {
return(file.path(tempdir(), fname))
}
|
78ca2563f01d63edb285bea73fffa27c942915b5 | 2cbc1106a7ed4b57df979a267bd8ee3cd7f6c2c7 | /man/wiki_graph.Rd | 43ca10d5105b3378c214a18bbd5ebaae810edf98 | [] | no_license | Dap246/Lab03 | 24b9f474f4d9728c55914de7d7112ee4476af426 | e659ea696fc5ed2a70707a4cc3dedb1e06c0e9c3 | refs/heads/master | 2023-08-26T03:49:58.811515 | 2021-09-14T20:08:05 | 2021-09-14T20:08:05 | 406,218,948 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 613 | rd | wiki_graph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{wiki_graph}
\alias{wiki_graph}
\title{Graph for Dijkstra's algorithm.}
\format{
A data frame for a graph of 6 nodes and the distance/weight between them:
\describe{
\item{v1}{vertices 1-6}
\item{v2}{vertices 1-6}
\item{w}{weight or distance for all vertices}
}
}
\source{
https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
}
\usage{
data(wiki_graph)
}
\description{
A dataset containing the prices and other attributes of almost 54,000
diamonds.
}
\examples{
data(wiki_graph)
}
\keyword{datasets}
|
20b5966637365cb1e14fc4f21f9c10555fc0b3ab | d4747fe5f7b17988c292573faeecb13e3b4f235e | /R/multiple.R | cb0de01fe6adbf912b3e68f6b829ada25d234c0c | [] | no_license | ElieLP/MultipleR | e85865fb200288aed61cd00ef027f02cec937bb9 | 23ff9ec300b66555b767b19787bf3d9c3326875a | refs/heads/master | 2020-03-15T13:42:37.935214 | 2018-05-04T18:27:54 | 2018-05-04T18:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 519 | r | multiple.R | #' Double
#'
#' @param x Number
#'
#' @return The double of x
#' @export
#'
#' @examples
#' Double(2)
#' 4
Double <- function(x = 1) {
return(x*2)
}
#' Triple
#'
#' @param x Number
#'
#' @return The triple of x
#' @export
#'
#' @examples
#' Triple(3)
#' 9
Triple <- function(x = 1) {
return(x*3)
}
#' Multiple
#'
#' @param x First number being multiplied
#' @param n Second number being multiplied
#'
#' @return x times n
#' @export
#'
#' @examples Multiple(3,6)
#' 18
Multiple <- function(x,n) {
return(x*n)
}
|
23c536d375cd03ead23e6a8f102bb319df73904d | be1a186d90ed7435615b3b10b97a16168dee2498 | /R/generate_packets.R | 891c63f7f2f76226d4a5e6299bbfb6ef08882bf4 | [] | no_license | paulmeinz/lrdatapacket | 505a7999464ff01d343ce8b1226c88cb3dd0ae9f | 801d4f96b0c0b8936aacf154183d609bc686c0d4 | refs/heads/master | 2021-01-21T13:34:01.186764 | 2016-05-03T16:13:43 | 2016-05-03T16:13:43 | 47,354,736 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,480 | r | generate_packets.R |
generate_packets <- function(data,
path,
use_subject = 'TRUE',
program = '',
program_short = '') {
subjects <- unique(data$subject_long)
if (use_subject) {
for (i in subjects) {
plot_data <- data[data$subject_long == i,]
subject <- plot_data[1, 'subject']
# Headcount Plots (duplicated and unduplicated)
title <- paste(i,':','\n', 'Duplicated Headcount by Academic Term',
sep = '')
save <- paste(path, subject,'.jpg', sep = '')
plot_headcounts(plot_data, save, title, undup = FALSE)
title <- paste(i,':','\n', 'Unuplicated Headcount by Academic Term',
sep = '')
save <- paste(path, subject, '1.jpg', sep = '')
plot_headcounts(plot_data, save, title)
# Disaggregated Headcount Plots....Age Range
title <- paste(i,':','\n', 'Headcount by Age Range', sep = '')
save <- paste(path, subject, '2.jpg', sep = '')
disag_hc_plot(plot_data, 'age', save, 'Age Range', title)
# Collapsed Age Range
title <- paste(i,':','\n', 'Headcount by Age Range (Collapsed)', sep = '')
save <- paste(path, subject, '3.jpg', sep = '')
disag_hc_plot(plot_data, 'agecol', save, 'Age Range', title)
# Gender
title <- paste(i,':','\n', 'Headcount by Gender', sep = '')
save <- paste(path, subject, '4.jpg', sep = '')
disag_hc_plot(plot_data, 'gender', save, 'Gender', title)
# Ethnicity
title <- paste(i,':','\n', 'Headcount by Ethnicity', sep = '')
save <- paste(path, subject, '5.jpg', sep = '')
disag_hc_plot(plot_data, 'ethnicity', save, 'Ethnicity', title)
# Educational Goal
title <- paste(i,':','\n', 'Headcount by Ed Goal', sep = '')
save <- paste(path, subject, '6.jpg', sep = '')
disag_hc_plot(plot_data, 'matr_goal', save, 'Educational Goal', title)
# Educational Level
title <- paste(i,':','\n', 'Headcount by Ed Level', sep = '')
save <- paste(path, subject, '7.jpg', sep = '')
disag_hc_plot(plot_data, 'matr_goal', save, 'Educational Level', title)
# Instructional Mode
title <- paste(i,':','\n', 'Headcount by Instructional Mode', sep = '')
save <- paste(path, subject, '8.jpg', sep = '')
disag_hc_plot(plot_data, 'inst_mode', save, 'Instructional Mode', title)
# Course Level
title <- paste(i,':','\n', 'Headcount by Course Level', sep = '')
save <- paste(path, subject, '9.jpg', sep = '')
disag_hc_plot(plot_data, 'course_number', save, 'Course Level', title)
# Freshman Status
title <- paste(i,':','\n', 'Headcount by Freshman Status', sep = '')
save <- paste(path, subject, '10.jpg', sep = '')
disag_hc_plot(plot_data, 'enroll_status', save, 'Freshman Status', title)
# Primary Language
title <- paste(i,':','\n', 'Headcount by Primary Language', sep = '')
save <- paste(path, subject, '11.jpg', sep = '')
disag_hc_plot(plot_data, 'language', save, 'Primary Language', title)
# Success Rate Plots...Age Range
title <- paste(i,':','\n', 'Success Rate by Age Range', sep = '')
save <- paste(path, subject, '12.jpg', sep = '')
disag_scs_plot(plot_data, 'age', save, 'Age Range', title)
# Age Range Collapsed
title <- paste(i,':','\n', 'Success Rate by Age Range (Collapsed)',
sep = '')
save <- paste(path, subject, '13.jpg', sep = '')
disag_scs_plot(plot_data, 'agecol', save, 'Age Range', title)
# Gender
title <- paste(i,':','\n', 'Success Rate by Gender', sep = '')
save <- paste(path, subject, '14.jpg', sep = '')
disag_scs_plot(plot_data, 'gender', save, 'Gender', title)
# Ethnicity
title <- paste(i,':','\n', 'Success Rate by Ethnicity', sep = '')
save <- paste(path, subject, '15.jpg', sep = '')
disag_scs_plot(plot_data, 'ethnicity', save, 'Ethnicity', title)
# Educational Goal
title <- paste(i,':','\n', 'Success Rate by Ed Goal', sep = '')
save <- paste(path, subject, '16.jpg', sep = '')
disag_scs_plot(plot_data, 'matr_goal', save, 'Educational Goal', title)
# Educational Level
title <- paste(i,':','\n', 'Success Rate by Ed Level', sep = '')
save <- paste(path, subject, '17.jpg', sep = '')
disag_scs_plot(plot_data, 'matr_goal', save, 'Educational Level', title)
# Instructional Mode
title <- paste(i,':','\n', 'Success Rate by Instructional Mode', sep = '')
save <- paste(path, subject, '18.jpg', sep = '')
disag_scs_plot(plot_data, 'inst_mode', save, 'Instructional Mode', title)
# Course Level
title <- paste(i,':','\n', 'Success Rate by Course Level', sep = '')
save <- paste(path, subject, '19.jpg', sep = '')
disag_scs_plot(plot_data, 'course_number', save, 'Course Level', title)
# Freshman Status
title <- paste(i,':','\n', 'Success Rate by Freshman Status', sep = '')
save <- paste(path, subject, '20.jpg', sep = '')
disag_scs_plot(plot_data, 'enroll_status', save, 'Freshman Status', title)
# Primary Language
title <- paste(i,':','\n', 'Success Rate by Primary Language', sep = '')
save <- paste(path, subject, '21.jpg', sep = '')
disag_scs_plot(plot_data, 'language', save, 'Primary Language', title)
}
}
if (use_subject) {
program <- 'Collegewide'
program_short <- 'CRCC'
}
# I don't like how this code repeats...
# Headcount Plots (duplicated and unduplicated)
title <- paste(program,':','\n', 'Duplicated Headcount by Academic Term',
sep = '')
save <- paste(path, program_short,'.jpg', sep = '')
plot_headcounts(data, save, title, undup = FALSE)
title <- paste(program,':','\n', 'Unuplicated Headcount by Academic Term',
sep = '')
save <- paste(path, program_short, '1.jpg', sep = '')
plot_headcounts(data, save, title)
# Disaggregated Headcount Plots....Age Range
title <- paste(program,':','\n', 'Headcount by Age Range', sep = '')
save <- paste(path, program_short, '2.jpg', sep = '')
disag_hc_plot(data, 'age', save, 'Age Range', title)
# Collapsed Age Range
title <- paste(program,':','\n', 'Headcount by Age Range (Collapsed)', sep = '')
save <- paste(path, program_short, '3.jpg', sep = '')
disag_hc_plot(data, 'agecol', save, 'Age Range', title)
# Gender
title <- paste(program,':','\n', 'Headcount by Gender', sep = '')
save <- paste(path, program_short, '4.jpg', sep = '')
disag_hc_plot(data, 'gender', save, 'Gender', title)
# Ethnicity
title <- paste(program,':','\n', 'Headcount by Ethnicity', sep = '')
save <- paste(path, program_short, '5.jpg', sep = '')
disag_hc_plot(data, 'ethnicity', save, 'Ethnicity', title)
# Educational Goal
title <- paste(program,':','\n', 'Headcount by Ed Goal', sep = '')
save <- paste(path, program_short, '6.jpg', sep = '')
disag_hc_plot(data, 'matr_goal', save, 'Educational Goal', title)
# Educational Level
title <- paste(program,':','\n', 'Headcount by Ed Level', sep = '')
save <- paste(path, program_short, '7.jpg', sep = '')
disag_hc_plot(data, 'matr_goal', save, 'Educational Level', title)
# Instructional Mode
title <- paste(program,':','\n', 'Headcount by Instructional Mode', sep = '')
save <- paste(path, program_short, '8.jpg', sep = '')
disag_hc_plot(data, 'inst_mode', save, 'Instructional Mode', title)
# Course Level
title <- paste(program,':','\n', 'Headcount by Course Level', sep = '')
save <- paste(path, program_short, '9.jpg', sep = '')
disag_hc_plot(data, 'course_number', save, 'Course Level', title)
# Freshman Status
title <- paste(program,':','\n', 'Headcount by Freshman Status', sep = '')
save <- paste(path, program_short, '10.jpg', sep = '')
disag_hc_plot(data, 'enroll_status', save, 'Freshman Status', title)
# Primary Language
title <- paste(program,':','\n', 'Headcount by Primary Language', sep = '')
save <- paste(path, program_short, '11.jpg', sep = '')
disag_hc_plot(data, 'language', save, 'Primary Language', title)
# Success Rate Plots...Age Range
title <- paste(program,':','\n', 'Success Rate by Age Range', sep = '')
save <- paste(path, program_short, '12.jpg', sep = '')
disag_scs_plot(data, 'age', save, 'Age Range', title)
# Age Range Collapsed
title <- paste(program,':','\n', 'Success Rate by Age Range (Collapsed)',
sep = '')
save <- paste(path, program_short, '13.jpg', sep = '')
disag_scs_plot(data, 'agecol', save, 'Age Range', title)
# Gender
title <- paste(program,':','\n', 'Success Rate by Gender', sep = '')
save <- paste(path, program_short, '14.jpg', sep = '')
disag_scs_plot(data, 'gender', save, 'Gender', title)
# Ethnicity
title <- paste(program,':','\n', 'Success Rate by Ethnicity', sep = '')
save <- paste(path, program_short, '15.jpg', sep = '')
disag_scs_plot(data, 'ethnicity', save, 'Ethnicity', title)
# Educational Goal
title <- paste(program,':','\n', 'Success Rate by Ed Goal', sep = '')
save <- paste(path, program_short, '16.jpg', sep = '')
disag_scs_plot(data, 'matr_goal', save, 'Educational Goal', title)
# Educational Level
title <- paste(program,':','\n', 'Success Rate by Ed Level', sep = '')
save <- paste(path, program_short, '17.jpg', sep = '')
disag_scs_plot(data, 'matr_goal', save, 'Educational Level', title)
# Instructional Mode
title <- paste(program,':','\n', 'Success Rate by Instructional Mode',
sep = '')
save <- paste(path, program_short, '18.jpg', sep = '')
disag_scs_plot(data, 'inst_mode', save, 'Instructional Mode', title)
# Course Level
title <- paste(program,':','\n', 'Success Rate by Course Level', sep = '')
save <- paste(path, program_short, '19.jpg', sep = '')
disag_scs_plot(data, 'course_number', save, 'Course Level', title)
# Freshman Status
title <- paste(program,':','\n', 'Success Rate by Freshman Status', sep = '')
save <- paste(path, program_short, '20.jpg', sep = '')
disag_scs_plot(data, 'enroll_status', save, 'Freshman Status', title)
# Primary Language
title <- paste(program,':','\n', 'Success Rate by Primary Language', sep = '')
save <- paste(path, program_short, '21.jpg', sep = '')
disag_scs_plot(data, 'language', save, 'Primary Language', title)
}
|
93ccc292a4eb4a9b237d44671da6366db598cec7 | 75f8a0d750aa880f5eaf72aafe9acba8746d9656 | /lectures/12/scripts/beeline.R | 5eff024d7d574f737d96e8c772f666819b9d35a7 | [] | no_license | dkhramov/iad_2020 | abe75b34c5fb422b1eb7ad320827a7253a7fb03d | 701b9eb08f65c0262808717549c369b270883a14 | refs/heads/master | 2021-02-06T22:43:27.924767 | 2020-03-20T12:19:24 | 2020-03-20T12:19:24 | 243,954,164 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,435 | r | beeline.R | #### Предварительная обработка данных
#### По мотивам статьи "Как я победил в конкурсе BigData от Beeline"
#### https://habrahabr.ru/post/270367/
train <- read.table(unzip("../data/train.zip"), header = T, sep=",")
str(train)
# x8
# x23-x61
## Шаг 1. Предварительный просмотр факторов
# факторы - переменные в номинальной шкале
class(train[,4])
# Смотрим уровни фактора
levels(train[,4])
# перекодировка
z.tmp <- as.numeric(train[,4])
# Сколько всего уникальных значений?
length(unique(train[,4]))
# Сколько раз встречается каждый из уровней?
table(train[,4])
## Шаг 2. Предварительный просмотр переменных,
## заданных в количественной шкале
# Как распределены числовые данные?
hist(train[,25])
# Не слишком ли мало столбцов?
hist(train[,25],breaks=100)
# Осмотрим на предмет выбросов
boxplot(train[,25])
# Выполним логарифмическое преобразование
# Сохраним неизменным исходный столбец данных
zzz <- train[,25]
# Исключим отрицательные значения и ноль
zzz2 <- zzz - min(zzz, na.rm = T) + 0.01
# Посмотрим на результаты логарифмирования
hist(log(zzz2))
# Стандартизируем (шкалируем) данные к [0;1]
# после нормализации
# maxs <- apply(train[ , с(25)], 2, max)
# mins <- apply(train[ , с(25)], 2, min)
# train.sc <- scale(train[ , с(25)], center = mins, scale = maxs - mins)
# Разберемся, кто является выбросом
# Есть ли выбросы?
boxplot(log(zzz2))
## Преобразование Бокса-Кокса
library(forecast)
# Поиск оптимальной lambda
lambda <- BoxCox.lambda(zzz)
# Преобразование данных
zzz3 <- BoxCox(zzz, lambda)
hist(zzz3)
# Попробуем другой столбец.
zzz <- train[,31]
hist(zzz)
# Поиск оптимальной lambda
lambda <- BoxCox.lambda(zzz)
# Преобразование данных
zzz3 <- BoxCox(zzz, lambda)
hist(zzz3)
## Контроль отрицательных значений
# Необходим для выполнения сдвига данных в положительную область
# перед логарифмированием логнормального распределения.
hist(train[,26])
boxplot(train[,26])
sum((train[!is.na(train[,26]),26]<0))
## Шаг 3. Определение выбросов
# Правило 3-х сигм
# Индикатор выброса:
zzz4 <- as.numeric(abs(zzz2 - mean(zzz2, na.rm = T)) > 3*sd(zzz2, na.rm = T))
# Распределение значений индикатора выброса
table(zzz4)
# Места выбросов заполянются NA
zzz[zzz4==1] <- NA
# Добавим индикатор выбросов zzz4 в данные, как новую колонку
## Автоматизация преобразования количественных переменных (шагов 2-3)
## Не рекомендуется: внутри используется преобразования Бокса-Кокса
length(c(9,24:62))
# Матрица индикаторов выбросов
extremes.ind <- matrix(rep(-99, 50000*40), nrow=50000, ncol=40)
j <- 1
for (i in c(9,24:62)){
# Поиск оптимальной lambda
lambda <- BoxCox.lambda( train[,i] )
# Преобразование данных
zzz3 <- BoxCox(train[,i], lambda)
zzz4 <- as.numeric(abs(zzz3 - mean(zzz3, na.rm = T)) > 3*sd(zzz3, na.rm = T))
zzz4[is.na(zzz4)] <- 0
train[zzz4==1,i] <- NA
extremes.ind[ , j] <- zzz4
j <- j+1
}
summary(extremes.ind)
## Шаг 4. Преобразование категориальных переменных: редкие значения.
# Это - хеши
x0 <- as.character(train$x0)
# Всего их 50000, но уникальных гораздо меньше.
# table() добавляет нам имена категорий
zzz <- table(x0)
# Уникальные хеши
names(zzz)
# Сколько их?
length(zzz)
# Находим хеши, встречающиеся реже чем в 0.5% случаев
zzz.1 <- names(zzz)[which( as.numeric(zzz)/nrow(train)*100 < 0.5 )]
# Помечаем их как редкие
x0[x0 %in% zzz.1 ] <- "Rare"
# В одну строчку
# x0[x0 %in% names(zzz)[as.numeric(zzz)/nrow(train)*100 < 0.5 ] ] <- "Rare"
# Отступление: поэтапное формирование условия "редкости"
plot(as.numeric(zzz))
plot(as.numeric(zzz)/nrow(train)*100)
which( as.numeric(zzz)/nrow(train)*100 < 0.5 )
## Шаг 5. Преобразование категориальных переменных: индикаторы значений.
# Процедура class.ind из пакета nnet
x0.class <- nnet::class.ind(factor(x0))
# Новым столбцам нужны более красивые имена
x0.class <- as.data.frame(x0.class) # иначе имена не назначатся
names(x0.class)[1:9] <- paste("x0.0", 1:9, sep= "")
names(x0.class)[10:ncol(x0.class)] <- paste("x0.", 10:ncol(x0.class), sep= "")
## Шаг 6. Feature engineering
thr_top <- 0.9
thr_bottom <- 0.05
# names(train) %in% c("x55", "x56", "x57", "x58", "x59", "x60")
#
# for col in ["x55", "x56", "x57", "x58", "x59", "x60"]:
# data["mostly_"+col] = (data[col] >= thr_top)*1
# data["no_"+col] = (data[col] <= thr_bottom)*1
# paste(n[!n %in% "y1"], collapse = " + ")
# paste("y1 ~", paste(n[!n %in% "y1"], collapse = " + "))
# ------------------------------------------------
one<-data.frame(train[,56:61])
sum(one[5,])
check<-data.frame(rep(0,50000),rep(0,50000))
a<-(one[,1])
table(as.numeric(a<0.05))
head(a1)
for (i in 1:6) {
a<-one[,i]
a<-as.numeric(a<0.05)
check[,i]<-a
}
for (i in 1:6) {
a<-one[,i]
a<-as.numeric(a>0.90)
check[,i+6]<-a
}
# ------------------------------------------------
?match
|
8b6dec9831683b1b7c0d55a1e452759e302ab939 | 730ec6f7b8046c842ee4b7d35bdace9cfd75f202 | /man/dada.Rd | a71711ac33e9d90bde608486a77f98feb350d8f9 | [] | no_license | cmsmoo/dada2 | e0a4d8a4eef1727bc0bfaf155a57a2c30a812111 | 0c99d4e6cf6d71c8733cd46fa31ada5df683fa3d | refs/heads/master | 2021-01-22T16:13:42.488161 | 2016-02-23T01:42:05 | 2016-02-23T01:42:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,103 | rd | dada.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dada.R
\name{dada}
\alias{dada}
\title{High resolution sample inference from amplicon data.}
\usage{
dada(derep, err, errorEstimationFunction = loessErrfun, selfConsist = FALSE,
aggregate = FALSE, ...)
}
\arguments{
\item{derep}{(Required). A derep-class object, the output of \code{\link{derepFastq}}.
A list of derep objects can be provided, in which case each will be independently denoised with
a shared error model.}
\item{err}{(Required). 16xN numeric matrix. Each entry must be between 0 and 1.
The matrix of estimated rates for each possible nucleotide transition (from sample nucleotide to read nucleotide).
Rows correspond to the 16 possible transitions (t_ij) indexed as so...
1:A->A, 2:A->C, 3:A->G, 4:A->T, 5:C->A, 6:C->C, 7:C->G, 8:C->T,
9:G->A, 10:G->C, 11:G->G, 12:G->T, 13:T->A, 14:T->C, 15:T->G, 16:T->T
Columns correspond to consensus quality scores. Typically there are 41 columns for the quality scores 0-40.
However, if USE_QUALS=FALSE, the matrix must have only one column.
If selfConsist = TRUE, this argument can be passed in as NULL and an initial error matrix will be estimated from the data
by assuming that all reads are errors away from one true sequence.}
\item{errorEstimationFunction}{(Optional). Function. Default loessErrfun.
If USE_QUALS = TRUE, \code{errorEstimationFunction(dada()$trans_out)} is computed after sample inference finishes
and the return value is used as the new estimate of the err matrix.
If USE_QUALS = FALSE, this argument is ignored, and transition rates are estimated by maximum likelihood (t_ij = n_ij/n_i).}
\item{selfConsist}{(Optional). \code{logical(1)}. Default FALSE.
If selfConsist = TRUE, the algorithm will alternate between sample inference and error rate estimation until convergence.
Error rate estimation is performed by the errorEstimationFunction, which is required for selfConsist mode. If dada is
run in selfConsist mode without specifying this function, the default loessErrfun will be used.
If selfConsist=FALSE the algorithm performs one round of sample inference based on the provided err matrix.}
\item{aggregate}{(Optional). \code{logical(1)}. Default is FALSE.
If aggregate = TRUE, the algorithm will pool together all samples prior to sample inference.
If aggregate = FALSE (default), sample inference is performed on each sample individually.
aggregate has no effect if only 1 sample is provided, and aggregate does not affect error rate estimation in selfConsist
mode, which always pools the observed transitions across samples.}
\item{...}{(Optional). All dada_opts can be passed in as arguments to the dada() function.
See \code{\link{setDadaOpt}} for a discussion of the various dada options.}
}
\value{
A \code{\link{dada-class}} object or list of such objects of a list of derep objects was provided.
}
\description{
The dada function takes as input dereplicated amplicon sequencing reads and returns the inferred composition
of the sample (or samples). Put another way, dada removes all sequencing errors to reveal the members of the
sequenced community.
If dada is run in selfConsist=TRUE mode, the algorithm will infer both the sample composition and
the parameters of its error model from the data.
}
\details{
Briefly, DADA implements a statiscal test for the notion that a specific sequence was seen too many times
to have been caused by amplicon errors from currently inferred sample sequences. Overly-abundant
sequences are used as the seeds of new clusters of sequencing reads, and the final set of clusters
is taken to represent the denoised composition of the sample. A more detailed explanation of the algorithm
is found in two publications:
\itemize{
\item{Callahan, B. J., McMurdie, P. J., Rosen, M. J., Han, A. W., Johnson, A. J., & Holmes, S. P. (2015). DADA2: High resolution sample inference from amplicon data. bioRxiv, 024034.}
\item{Rosen, M. J., Callahan, B. J., Fisher, D. S., & Holmes, S. P. (2012). Denoising PCR-amplified metagenome data. BMC bioinformatics, 13(1), 283.}
}
DADA depends on a parametric error model of substitutions. Thus the quality of its sample inference is affected
by the accuracy of the estimated error rates. DADA's selfConsist mode allows these error rates to be inferred
from the data.
All of DADA's comparisons between sequences depend on pairwise alignments. This step is the most computationally
intensive part of the algorithm, and two alignment heuristics have been implemented for speed: A kmer-distance
screen and a banded Needleman-Wunsch alignmemt. See \code{\link{setDadaOpt}}.
}
\examples{
derep1 = derepFastq(system.file("extdata", "sam1F.fastq.gz", package="dada2"))
derep2 = derepFastq(system.file("extdata", "sam2F.fastq.gz", package="dada2"))
dada(derep1, err=tperr1)
dada(list(sam1=derep1, sam2=derep2), err=tperr1, selfConsist=TRUE)
dada(derep1, err=inflateErr(tperr1,2), BAND_SIZE=32, OMEGA_A=1e-20)
}
\seealso{
\code{\link{derepFastq}}
\code{\link{setDadaOpt}}
}
|
6e68586f630ff2f5a4d3f2aca722049aa0ac0ee3 | 5dc8f146c468481e8fbda425d4a71a0fe0182f63 | /chap03/qplot.R | 0305843865a2889d725600e1ecc84fe04782e17d | [] | no_license | kyh8874/R | 12fe23f129c726dd3a360fd780f96416a0d70ab8 | b435c030691e013df5386d049dcc79b09ac6f17e | refs/heads/master | 2020-04-28T04:05:57.329401 | 2019-05-01T03:02:38 | 2019-05-01T03:02:38 | 174,964,608 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 515 | r | qplot.R | sample(seq(1,10, length.out=100), replace = T, size = 1000) -> data1
length(data1)
str(data1)
table(data1)
qplot(data1)
sample(c('a','b','c','d'),100,replace=T, prob=c(0.2,0.5,0.9,0.3))->data2
table(data2)
qplot(data2)
library(sqldf)
c(80, 60, 70, 50, 90)-> exam
exam
sum(exam)/5 -> avg_exam
avg_exam
mean(exam)
seq(1,45,length.out=6)
seq()
sample(seq(1,45,length.out=100), replace=T,size=1000) -> loo
loo
sample(seq(1,45,length.out=45), replace=T,size=1000) -> lo
lo%/%1 ->to
table(to)
qplot(to)
|
6e316f16f5ca33c9306f0c5a4018e8ae012ff843 | 05d2e2061c8a8383b6ff43f646b635a5b34eb413 | /Splicing_Workflow/CPM_feature_table.R | 9898407f35d1244c968930a4fe9b441aca0056c5 | [] | no_license | cwarden45/RNAseq_templates | 6a0a971985bff86e405582a441313517f478e192 | 281ecf76ce852a3b9302ea89674f65fe7b68292f | refs/heads/master | 2022-06-18T05:15:17.374337 | 2022-06-09T21:19:45 | 2022-06-09T21:19:45 | 60,568,054 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,556 | r | CPM_feature_table.R | normalizeTotalExpression = function (geneExpr, totalReads) {
return(geneExpr / totalReads)
}#end def normalizeTotalExpression
param.table = read.table("parameters.txt", header=T, sep="\t")
sample.description.file = as.character(param.table$Value[param.table$Parameter == "sample_description_file"])
count.folder=as.character(param.table$Value[param.table$Parameter == "QoRTs_Merged_Folder"])
comp.name=as.character(param.table$Value[param.table$Parameter == "comp_name"])
user.folder = as.character(param.table$Value[param.table$Parameter == "Result_Folder"])
count.file = paste(user.folder,"/QoRTS_JunctionSeq_feature_counts.txt",sep="")
CPM.file = paste(user.folder,"/QoRTS_JunctionSeq_feature_CPM.txt",sep="")
stat.file = paste(user.folder,"/DSG/feature_stats_",comp.name,".txt",sep="")
stat.table = read.table(stat.file, head=T, sep="\t")
feature.info = stat.table[,1:7]
rm(stat.table)
sample.description.table = read.table(sample.description.file,head=T, sep="\t")
sampleIDs = as.character(sample.description.table$sample.ID)
sample.label = as.character(sample.description.table$userID)
total.million.aligned.reads = as.numeric(sample.description.table$aligned.reads) / 1000000
countFiles = paste(count.folder,"/",sampleIDs,"/QC.spliceJunctionAndExonCounts.withNovel.forJunctionSeq.txt",sep="")
for (i in 1:length(countFiles)){
if(!file.exists(countFiles[i])){
gzFile = paste(countFiles[i],".gz",sep="")
command = paste("gunzip -c ",gzFile," > ",countFiles[i],sep="")
system(command)
}#end if(!file.exists(countFiles[i]))
}#end for (i in 1:length(countFiles))
#some loss of overlapping features, but mostly covered
for (i in 1:length(countFiles)){
coverage.table = read.table(countFiles[i],head=F, sep="\t")
feature.name = coverage.table[,1]
feature.count = coverage.table[,2]
sample.counts = feature.count[match(feature.info[,1], feature.name)]
if(i == 1){
count.mat = data.frame(sampleID = sample.counts)
}else{
count.mat = data.frame(count.mat, sample.counts)
}
}#end for (i in 1:length(countFiles))
colnames(count.mat) = sample.label
annotated.count.table = data.frame(feature.info, count.mat)
write.table(annotated.count.table, count.file, quote=F, sep="\t", row.names=F)
CPM = round(t(apply(count.mat, 1, normalizeTotalExpression, totalReads = total.million.aligned.reads)), digits=1)
colnames(CPM) = sample.label
annotated.CPM.table = data.frame(feature.info, CPM)
write.table(annotated.CPM.table, CPM.file, quote=F, sep="\t", row.names=F) |
4ed8efc6fe88316fd40e8d5787adb53876bf2152 | 2b9b26a5aee3dd3bf9ec492ec0d53f7a9d76d7ec | /in-work/gadgets/gadget_clean_rows.R | 74d4bc911500ea70d26bec5dd41364b56354f6f0 | [] | no_license | whaleshark16/teachingApps | 808d7cb282aaedbcd7d3c389bd5f7f01ebbac7ff | 47f9f5275719087575565d5facfb6c94a5ff5d53 | refs/heads/master | 2021-09-07T17:06:21.479444 | 2018-02-26T15:22:00 | 2018-02-26T15:22:00 | 118,481,453 | 0 | 0 | null | 2018-02-22T18:27:25 | 2018-01-22T16:12:55 | HTML | UTF-8 | R | false | false | 3,088 | r | gadget_clean_rows.R | #' Subset data using column values
#'
#' @description Shiny gadget used to visually inspect column values in a data set
#' and subset rows by specifying column values
#'
#' @param data A data set
#' @param colorBy \code{character} Column by which the \code{parcoords} plot should be colored
#' @param theme \code{character} A bootswatch theme provided to \code{shinythemes::shinytheme}
#' @param width \code{character} Width of the gadget (in valid css units)
#' @param height \code{character} Height of the gadget (in valid css units)
#' @param css \code{character} Path to a custom css file
#'
#' @import crosstalk
#' @importFrom shinythemes shinytheme
#' @importFrom shiny runGadget browserViewer
#' @importFrom shiny fluidPage tags includeCSS sidebarLayout sidebarPanel
#' @importFrom shiny uiOutput selectizeInput actionButton reactive h4
#' @importFrom shiny stopApp observeEvent mainPanel
#' @importFrom data.table as.data.table
#' @importFrom DT renderDataTable dataTableOutput datatable
#'
#' @return A \code{list} of length 2
#' \item{data}{A \code{data.frame} containing the columns that were not removed}
#' \item{script}{A line of code that can be used to replicate cleaning performed in the gadget}
#'
#' @examples \dontrun{clean_rows(mtcars)}
#'
#' @family shinygadgets
#' @return A printed shiny app
#' @export
gadget_clean_rows <-
function(data,
theme = "flatly",
colorBy = NULL,
width = '100%',
height = '600px',
css = NULL) {
pacman::p_load_gh('timelyportfolio/parcoords')
ui = navbarPage(title = 'Data Cleaning App',
collapsible = T,
position = 'fixed-top',
theme = shinythemes::shinytheme(theme = theme),
header = if(is.null(css)) teachingApps::add_css(),
tabPanel(h4('Parcoords'),
fluidRow( parcoordsOutput('DiamondPlot'))),
tabPanel(h4('Selected Data'),
sidebarLayout(
sidebarPanel(width = 3,
actionButton('done',h4('Finish'), width = '100%')),
mainPanel(width = 9,
DT::dataTableOutput('SelectedData')))))
server = function(input, output) {
output$DiamondPlot <- renderParcoords({
parcoords(data,
rownames= T,
color = list(colorScale = htmlwidgets::JS('d3.scale.category10()'),
colorBy = colorBy),
autoresize = T,
reorderable = T,
width = NULL,
height = 800,
brushMode = "1D")
})
###Here we can access the variable input$id_rows to determine which are selected
###we display these results in a table
ids <- reactive({ rownames(data) %in% input$DiamondPlot_brushed_row_names })
output$SelectedData <- DT::renderDataTable({
DT::datatable(data[ids(),])
})
observeEvent(input$done, {
stopApp(list(data = as.data.frame( data[ids(),] )))
})
}
runGadget(app = ui,
server = server,
viewer = browserViewer(browser = getOption("browser")))
} |
10af5a3813dd1df1f1ffad30bfb5a6af77168255 | 57ed22671d2c348fe35c7832fd008c3a51de039c | /R/lea_prep.R | 4fce69aa14028eeaa6f03df737276a7c755fc19b | [
"MIT"
] | permissive | datalorax/leaidr | 694c4d1d6d7773454673876d3c982d0db49f80b7 | 26f4672c98cae96a6ecc96e9c705890ed7a8ecb7 | refs/heads/master | 2022-11-20T13:38:44.794448 | 2020-07-27T21:42:36 | 2020-07-27T21:42:36 | 281,791,912 | 1 | 0 | null | 2020-07-22T22:02:47 | 2020-07-22T22:02:46 | null | UTF-8 | R | false | false | 1,138 | r | lea_prep.R | #' Prep State- or Nation-Wide District Shapefile
#'
#' @name lea_prep
#' @aliases lea_prep
#' @export lea_prep
#'
#' @description
#' `lea_prep()` creates your desired shapefile.
#' @usage
#' lea_prep(path = NULL, fips = NULL)
#'
#' @param path A character vector specifying a file path, such as: path = "./test".
#' @param fips A character vector specifying a FIPS code for a state. A reference table is available [here](https://www.mcc.co.mercer.pa.us/dps/state_fips_code_listing.htm).
#'
#' @rdname lea_prep
#' @export lea_prep
#'
#' @return
#' A shapefile.
lea_prep <- function(path = NULL, fips = NULL){
if(is.null(path)){
stop("Please designate where the shapefiles from `lea_get()` exist, like this: `path = './test'`.")
}
if(is.null(fips)){
stop("Please designate which fip(s) you would like. If you would like the whole U.S., please write `fips = 'All'`.")
}
dis_map <- readOGR(dsn = file.path(path, "schooldistrict_sy1819_tl19.shp"),
layer = "schooldistrict_sy1819_tl19")
if(fips == "All"){
dis_map
} else {
dis_map <-
dis_map[which(dis_map$STATEFP %in% fips),]
}
}
|
0c6cf68803dc8670d9eb13c15d1222b1489ef1ce | 1b3e04e8978911acdad27507efe9c7f7bc6d68d3 | /R_Differences_Met31Met32.R | 463826ff83d76fbb9168141cc2160172fd490fbc | [] | no_license | r-scott-m/TimeSeriesFitting-R | 9a2f8249afa4b0b2719b7835315389ea28cd978e | dbeec39f1024d6c6641cba73bebb0d24e48769f7 | refs/heads/master | 2020-04-15T04:29:35.612004 | 2019-01-07T06:04:37 | 2019-01-07T06:04:37 | 164,385,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,336 | r | R_Differences_Met31Met32.R | require("qvalue")
require("gplots")
#Allegra <- read.table("Allgera_M31M32Different.txt",header=FALSE)
#row.names(Allegra) = Allegra$V2
WD = "/Users/smcisaac/Downloads"
setwd(WD)
y <-read.table("DataMetLimitedMerged_2011_1122_pruned_knn_clustered_SVDsubtract_RECLUSTER.txt",header =TRUE, row.names = 1, sep ="\t")
#only get Met31 and Met32 data
y <- y[,9:24]
#remove genes that are 0 at each timepoint
Check <- apply(y,1,function(x){sum(x==0)==16}) #indices of zero rows
y <- y[!Check,]
Input <- y
#we want there to be variation between the initial time points (0-5 minutes) and the later timepoints when we expect real action from the TF. Require that there is at least a 1.5-fold change between timepoints at(15-90) and either 2.5 and 5.
Dim <- dim(y)[1]
Cut <- log(1.5,base=2)
y2 <-y
for(i in seq(Dim,1)){
vec1 = y[i,c(2,4,5,6,7,8)]
vec2 = y[i,3:8]
vec3 = y[i,c(10,12,13,14,15,16)]
vec4 = y[i,3:8]
vec1 <- vec1 - vec1[[1]]
vec2 <- vec2 - vec2[[1]]
vec3 <- vec3 - vec3[[1]]
vec4 <- vec4 - vec4[[1]]
val1 <- max(abs(vec1 - vec1[[1]]))
val2 <- max(abs(vec2 - vec2[[1]]))
val3 <- max(abs(vec3 - vec3[[1]]))
val4 <- max(abs(vec4 - vec4[[1]]))
if(val1 < Cut && val2 < Cut && val3 < Cut && val4 < Cut){
y <- y[-i,]
}
}
#do regression on just Met31 and Met32 to check for genes that have significant time dependence.
#Significance is determined by computing the p-value of the F-statistic. If the p-value is less than 0.05
#in at least 1 of the time courses keep it.
Met31 <- y[,1:8]
Met32 <- y[,9:16]
Dim = dim(Met31)[1]
t1 <- c(0,2.5,5,15,30,45,60,90)
model.Met31 <- apply(Met31,1,function(x){lm(x ~ -1 + t1 + I(t1^2))})
model.Met32 <- apply(Met32,1,function(x){lm(x ~ -1 + t1 + I(t1^2))})
fpval.Met31 <- rep(0,dim(Met31)[1]) #store pvalues of the f-statistic
fpval.Met32 <- rep(0,dim(Met31)[1]) #store pvalues of the f-statistic
fpval.keep <- rep(0,dim(Met31)[1])
for(i in seq(1,Dim)){
temp.Met31 <- summary(model.Met31[[i]])
temp.Met32 <- summary(model.Met32[[i]])
fpval.Met31[i] <- pf(temp.Met31$fstatistic[1],temp.Met31$fstatistic[2],temp.Met31$fstatistic[3],lower.tail=FALSE)
fpval.Met32[i] <- pf(temp.Met32$fstatistic[1],temp.Met32$fstatistic[2],temp.Met32$fstatistic[3],lower.tail=FALSE)
if(is.nan(fpval.Met31[i])){
fpval.Met31[i] = .1
}
if(is.nan(fpval.Met32[i])){
fpval.Met32[i] = .1
}
if(fpval.Met31[i]<0.05 | fpval.Met32[i]<0.05){
fpval.keep[i] = 1
}
}
y <- y[!!fpval.keep,]
Met31 <- Met31[!!fpval.keep,]
Met32 <- Met32[!!fpval.keep,]
#time points
t <- c(0,2.5,5,15,30,45,60,90,0,2.5,5,15,30,45,60,90)
#genes that are different between Met31 and Met32
Ds <- c(0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1)
#Model A
A <- apply(y,1,function(x){lm(x ~ -1 + t + I(t^2) + (Ds:t) + (Ds:I(t^2)))})
#Model B
B <- apply(y,1,function(x){lm(x ~ -1 + t + I(t^2))})
#Log-likelihoods
LA <- sapply(A,function(x){logLik(x)})
LB <- sapply(B,function(x){logLik(x)})
#log-likelihood test statistic
D <- -2*LB + 2*LA
Pval <- pchisq(D,df = 2, lower.tail = F) #is it 3 or 2?
Q <- qvalue(Pval,lambda = 0)
#get the qvalues for each row
Q2 <- Q$qvalues
DistStore <- rep(0,dim(Met31)[1])
#find maximum distance between genes by timepoint
for(i in seq(1,dim(Met31)[1])){
temp <- dist(rbind(as.vector(data.matrix(Met31[i,])),as.vector(data.matrix(Met32[i,]))),method="maximum")[1]
DistStore[i] = temp
}
names(DistStore) = row.names(y)
#Store Q-values + distances, and find if Q < 0.05 + dist >0.585 (1.5-fold)
QD <- t(rbind(DistStore,Q2))
QD2 <- cbind(QD,rep(0,dim(Met31)[1]))
for(i in seq(1,dim(Met31)[1])){
if(QD2[i,1]>log(1.5,base=2) && QD2[i,2]<0.05){
QD2[i,3] = 1
}
}
Output <- y[!!as.vector(QD2[,3]),]
#compare to genes that are different from Allegra's experiments.
Row <- row.names(Output)
Z <- sapply(Row,function(x){strsplit(x," ")})
Z2 <- as.vector(sapply(Z,function(x){x[[3]]}))
intersect(Z2,row.names(Allegra))
notchosen <- Input[!(row.names(Input) %in% row.names(Output)),]
View(notchosen)
pairs.breaks <- c(seq(-2, 0, length.out=50),seq(0, 2, length.out=50))
pairs.breaks[51] = 0.01
mycol <- colorpanel(n=99,low="green",mid="black",high="red")
ClusterInfo <- heatmap.2(data.matrix(Output),breaks=pairs.breaks,Colv = FALSE,trace="none",scale="none",col=mycol,distfun=function(x) as.dist((1-cor(t(x)))/2),dendrogram = "none",density.info = "none",labCol="",cexRow=0.15)
ClusterInfo <- heatmap.2(data.matrix(notchosen),breaks=pairs.breaks,Colv = FALSE,trace="none",scale="none",col=mycol,distfun=function(x) as.dist((1-cor(t(x)))/2),dendrogram = "none",density.info = "none",labCol="",cexRow=0.15)
write.table(Output, file=paste(WD,'LinearModeling_Met31Met32_Big.txt', sep='/'), sep='\t', col.names=NA, row.names = TRUE, quote=FALSE)
########### two-sided t-test to compare Met31 and Met32
Dim = dim(Met31)[1]
Pval.ttest = rep(0,Dim)
for(i in seq(1,Dim)){
temp <- t.test(as.vector(data.matrix(Met31[i,])),as.vector(data.matrix(Met32[i,])),alternative="two.sided",paired = TRUE, conf.level = 0.95)
Pval.ttest[i] <- temp$p.value
}
Q.ttest <- qvalue(Pval.ttest)
Q2.ttest <- Q.ttest$qvalues
####what if we just took the means?
Met31 = Input[1:8,]
Met32 = Input[9:16,]
A <- as.numeric(apply(Input,1,function(x) {t.test(x[1:8],x[9:16])$p.value}))
mychoose <- A < 0.01
Input_thresholded = Input[mychoose,]
ClusterInfo <- heatmap.2(data.matrix(Input_thresholded),breaks=pairs.breaks,Colv = FALSE,trace="none",scale="none",col=mycol,distfun=function(x) as.dist((1-cor(t(x)))/2),dendrogram = "none",density.info = "none",labCol="",cexRow=0.15)
###do regression in tidy format!
g <- as.numeric(as.vector(Output[20,]))
df <- tibble(gene = rep(strsplit(row.names(Output[20,])," ")[[1]][3],16),
g = g,
t=t,
TF = c(rep("MET31",8),rep("MET32",8)),
Ds = Ds)
all_gene_fits = df %>%
group_by(gene) %>%
do(fit = lm(g ~ -1 + t + I(t^2) + (Ds:t) + (Ds:I(t^2)), data = .)) %>%
tidy(fit)
df %>%
group_by(gene) %>%
do(fit = lm(g ~ -1 + t + I(t^2) + (Ds:t) + (Ds:I(t^2)), data = .)) %>%
tidy(fit)
df %>%
group_by(gene) %>%
do(fit = lm(g ~ -1 + t + I(t^2) + (Ds:t) + (Ds:I(t^2)), data = .)) %>%
glance(fit)
#return f-statistic?
# can we do nested model?
plot(lm(g ~ -1 + t + I(t^2) + (Ds:t) + (Ds:I(t^2))))
|
3e2804f9bfd4d85da99d8c1da9b9c5e756ab75a7 | 00372f4ec66183c56629cf49be73faa016cc651b | /man/svyglmParallel.rd | 0b8612174182e616da944e5aca38181588928197 | [] | no_license | kevinblighe/RegParallel | 9427d66904a3965a3e82197cd0569ab6c83afeff | 818b7b4c60d3e8a49549cbd02fb8a3009e6fe6f9 | refs/heads/master | 2021-11-06T15:16:32.386110 | 2021-10-01T23:16:39 | 2021-10-01T23:16:39 | 103,679,101 | 36 | 8 | null | null | null | null | UTF-8 | R | false | false | 2,474 | rd | svyglmParallel.rd | \name{svyglmParallel}
\alias{svyglmParallel}
\title{Standard regression functions in R enabled for parallel processing over large data-frames - generalised linear model, with survey weights}
\description{This is a non-user function that is managed by RegParallel, the primary function.}
\usage{
svyglmParallel(
data,
design,
formula.list,
FUN,
variables,
terms,
startIndex,
blocksize,
blocks,
APPLYFUN,
conflevel,
excludeTerms,
excludeIntercept)
}
\arguments{
\item{data}{A data-frame that contains all model terms to be tested.
Variables that have all zeros will, automatically, be removed. REQUIRED.}
\item{design}{A survey design, created by survey::svydesign. REQUIRED.}
\item{formula.list}{A list containing formulae that can be coerced to
formula class via as.formula(). REQUIRED.}
\item{FUN}{Regression function. Must be of form, for example:
function(formula, data) glm(formula = formula, family = binomial, data = data).
REQUIRED.}
\item{variables}{Vector of variable names in data to be tested
independently. Each variable will have its own formula in formula.list.
REQUIRED.}
\item{terms}{Vector of terms used in the formulae in formula.list, excluding
the primary variable of interest. REQUIRED.}
\item{startIndex}{Starting column index in data object from which
processing can commence. REQUIRED.}
\item{blocksize}{Number of variables to test in each foreach loop.
REQUIRED.}
\item{blocks}{Total number of blocks required to complete analysis.
REQUIRED.}
\item{APPLYFUN}{The apply function to be used within each block during
processing. Will be one of: 'mclapply(...)', system=linux/mac and
nestedParallel=TRUE; 'parLapply(cl, ...)', system=windows and
nestedParallel=TRUE; 'lapply(...)', nestedParallel=FALSE. REQUIRED.}
\item{conflevel}{Confidence level for calculating odds or hazard ratios.
REQUIRED.}
\item{excludeTerms}{Remove these terms from the final output. These will
simply be grepped out. REQUIRED.}
\item{excludeIntercept}{Remove intercept terms from the final output.
REQUIRED.}
}
\details{
This is a non-user function that is managed by RegParallel, the
primary function.
}
\value{
A \code{\link{data.table}} object.
}
\author{
Kevin Blighe <[email protected]>
}
\examples{
require(survey)
data(nhanes)
design <- svydesign(id = ~ SDMVPSU,
strata = ~ SDMVSTRA,
weights = ~ WTMEC2YR,
nest = TRUE,
data = nhanes)
}
|
784e2ba856ea64a9b9c2a09ff00cbbfa5a592adf | 17e9b666d8447caa58381f2502980b8f3ec7f466 | /R/RInAction/4-1.r | eece0cba9f85a0b8789ccb088f3ece7fc6922cbb | [] | no_license | hangyan/Code | 079ac796a309abc0a1d8b8a61baeac645c5b791e | 2a43a676a00f41afc05d7a5c8aa52b714195e8c9 | refs/heads/master | 2021-08-01T04:15:52.828481 | 2021-07-26T05:32:09 | 2021-07-26T05:32:09 | 25,571,986 | 6 | 16 | null | null | null | null | UTF-8 | R | false | false | 1,765 | r | 4-1.r | manager <- c(1, 2, 3, 4, 5)
date <- c("10/24/08", "10/28/08", "10/1/08", "10/12/08", "5/1/09")
country <- c("US", "US", "UK", "UK", "UK")
gender <- c("M", "F", "F", "M", "F")
age <- c(32, 45, 25, 39, 99)
q1 <- c(5, 3, 3, 3, 2)
q2 <- c(4, 5, 5, 3, 2)
q3 <- c(5, 2, 5, 4, 1)
q4 <- c(5, 5, 5, NA, 2)
q5 <- c(5, 5, 2, NA, 1)
leadership <- data.frame(manager, date, country, gender, age,
q1, q2, q3, q4, q5, stringsAsFactors = FALSE)
mydata <- data.frame(x1 = c(2, 2, 6, 4), x2 = c(3, 4, 2, 8))
mydata$sumx <- mydata$x1 + mydata$x2
mydata$meanx <- (mydata$x1 + mydata$x2) / 2
mydata <- transform(mydata,
sumx = x1 + x2,
meanx = (x1 + x2) / 2)
leadership$age[leadership$age == 99] <- NA
leadership$agecat[leadership$age > 75] <- "Elder"
leadership$agecat[leadership$age >= 55 & leadership$age <= 75] <- "Middle Aged"
leadership$agecat[leadership$age < 55] <- "Young"
leadership <- within(leadership, {
agecat <- NA
agecat[age > 75] <- "Elder"
agecat[age >= 55 & age <= 75] <- "Middle Aged"
agecat[age < 55] <- "Young"
})
newdata <- na.omit(leadership)
mydates <- as.Date(c("2007-06-22", "2004-02-13"))
myformat <- "%m/%d/%y"
leadership$date <- as.Date(leadership$date, myformat)
newdata <- leadership[order(leadership$age),]
leadership$date <- as.Date(leadership$date, "%m/%d/%y")
startdate <- as.Date("2009-01-01")
enddate <- as.Date("2009-10-31")
newdata <- leadership[which(leadership$date >= startdate & leadership$age <= enddate),]
newdata
newdata <- subset(leadership, age >= 35 | age < 24, select = c(q1,q2,q3,q4))
newdata
newdata <- subset(leadership, gender == "M" & age > 25, select = gender:q4)
mysample <- leadership[sample(1:nrow(leadership),3,replace = FALSE),]
|
232bdeb4a2f64827dcc13569e82a044b76de2fa6 | 0f96b45966da3fd162b7d1810f413d23da242139 | /PackageCH/R/ComputeGManually.R | 6b4f5aa89095f6b81afce293153532894fb07c8e | [] | no_license | duvaneljulien/PackageCH | 0e281ba39edb0fd87678e96341f391661a7b57f8 | 9c127f8c5a0a33918cee76e3413a6b9fdade4328 | refs/heads/master | 2020-05-07T22:14:09.540087 | 2015-01-15T03:30:26 | 2015-01-15T03:30:26 | 29,278,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,334 | r | ComputeGManually.R | library(doMC)
library(foreach)
build_matrix_G_manually <- function(path.snps = "data/SNPs_14102014_145630.raw") {
N <- 20
# Get results (stored into SNPS_datetime.stamp.raw)
Phenotypes <- read.table(path.snps, header = TRUE)
# Remove rows with NA's
Phenotypes <- Phenotypes[complete.cases(Phenotypes), ]
# We must have "famid" and "id" as the two first columns
colnames(Phenotypes) <- c("famid", "id")
# Value of SNPs are stored in the last N columns
snps <- Phenotypes[, (ncol(Phenotypes)-N+1):ncol(Phenotypes)]
# Way (!) faster than doing two for loop
G <- dist(snps)
}
HeritabilityEstimation <- function(P, G) {
P <- as.matrix(P)
### We keep one phenotype and normalize its
V <- matrix(P,
nrow = nrow(P),
ncol = ncol(P))
V <- t(t(V) - colMeans(V))
V.sd <- colSds(V)
# Return
V <- t(t(V) * (1/V.sd))
# Get data for the specific phenotype
Y.distance.carre <- matrix(rep(V^2,nrow(V)),
ncol = nrow(V)) +
t(matrix(rep(V^2,nrow(V)),
ncol = nrow(V))) -
2 * V %*%t(V)
Y.distance <- sqrt(Y.distance.carre)
dcov(Y.distance, G) / (sqrt(dcov(G, G)) * sqrt(dcov(Y.distance, Y.distance)))
}
|
573d0c2db25a90a8f80f7712e7b59cb6703c4623 | 7d4841b039093f3009eb72a65ec0d506a1244082 | /figures/Figure 4/plot_UE3_heatmap.R | 530e9d6bb2cafed101a163003450cf8ee4779c81 | [] | no_license | sturkarslan/evolution-of-syntrophy | dfd18b1ec0181c5b64ef54f358aa246ca945475b | 5867dc719bdbdc4e4c98a12b8131e397b671b29f | refs/heads/master | 2021-08-08T22:03:47.013067 | 2020-06-23T05:31:37 | 2020-06-23T05:31:37 | 194,342,169 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,395 | r | plot_UE3_heatmap.R | ############# Serdar Turkarslan / Institute for Systems Biology ###############
# Last update: 06/05/2020
###############################################################################
# Plot heatmap for comparing mutations for UE3 line of Dv and Mm
#
###############################################################################
library('ggplot2'); library('reshape2');library(gridExtra);library(gplots);library('pheatmap')
source("~/Documents/Github/evolution-of-syntrophy/scripts/extractLegend.R")
# load early generation mutations data
mut.data.dv <- read.delim("~/Documents/GitHub/evolution-of-syntrophy/data/dvh_mutations_allsamples_attributes_3112020.txt", header=T, sep="\t", stringsAsFactors=FALSE)
mut.data.mm <- read.delim("~/Documents/GitHub/evolution-of-syntrophy/data/mmp_mutations_allsamples_attributes_3112020.txt", header=T, sep="\t", stringsAsFactors=FALSE)
ci.names <- read.delim("~/Documents/GitHub/evolution-of-syntrophy/data/clonal_isolate_pairs_IDs.txt", header=T, sep="\t")
# get DV Clones
ci.names.dv <- ci.names[grep("D", ci.names$isolate),]
mut.data.dv$sample.name <- mut.data.dv$sample
mut.data.dv <- mut.data.dv[grep("CI_00", mut.data.dv$sample, invert = T),]
# get Mm Clones
ci.names.mm <- ci.names[grep("M", ci.names$isolate),]
mut.data.mmsample.name <- mut.data.mm$sample
mut.data.mm <- mut.data.mm[grep("(CI_00|CI_36|CI_37)", mut.data.mm$sample, invert = T),]
mut.data.mm <- mut.data.mm[grep("WT", mut.data.mm$sample, invert = T),]
### replace geneid for intergenic mutations with IG for DV and translate clonal isolate names
for(row in 1:length(mut.data.dv$variant_id)){
variant <- mut.data.dv[row,"variant_id"]
locus_name <- strsplit(as.character(variant), split = "-", fixed=T)[[1]][2]
position <- strsplit(as.character(variant), split = "-", fixed=T)[[1]][3]
#copy sample names into a new column
sample <- mut.data.dv[row,"sample"]
sample.type <- strsplit(as.character(sample), split = "_", fixed=T)[[1]][1]
sample.no <- strsplit(as.character(sample), split = "_", fixed=T)[[1]][2]
if(sample.type == "CI"){
sample.name <- paste(as.character(ci.names.dv[which(ci.names.dv$pair == sample.no),"isolate"]), sep = "")
} else {
sample.name <- sample
}
if(locus_name == "IG"){
locus_name <- paste("IG", as.character(position), sep="_")
}
mut.data.dv[row,"locus_name"] <- locus_name
mut.data.dv[row,"sample.name"] <- sample.name
}
### replace geneid for intergenic mutations with IG for Mm and translate clonal isolate names
for(row in 1:length(mut.data.mm$variant_id)){
variant <- mut.data.mm[row,"variant_id"]
locus_name <- strsplit(as.character(variant), split = "-", fixed=T)[[1]][2]
position <- strsplit(as.character(variant), split = "-", fixed=T)[[1]][3]
#copy sample names into a new column
sample <- mut.data.mm[row,"sample"]
sample.type <- strsplit(as.character(sample), split = "_", fixed=T)[[1]][1]
sample.no <- strsplit(as.character(sample), split = "_", fixed=T)[[1]][2]
if(sample.type == "CI" & !(sample.no %in% c("36","37"))){
sample.name <- paste(as.character(ci.names.mm[which(ci.names.mm$pair == sample.no),"isolate"]), sep = "")
} else {
sample.name <- sample
}
if(locus_name == "IG"){
locus_name <- paste("IG", as.character(position), sep="_")
}
mut.data.mm[row,"locus_name"] <- locus_name
mut.data.mm[row,"sample.name"] <- sample.name
}
###### format early generations mutations data for dvh
mut.data.eg.dv <- mut.data.dv[grep(c("(AN_Dv-Ancestor-1|_UE3|CI(_2_|_6_|_8_|_20_|_21_|_1(1_|3_|4_)|_37_))"), mut.data.dv$sample),]
mut.data.eg.dv$transfer <- sapply(mut.data.eg.dv$sample, function(i) strsplit(as.character(i), split = "-")[[1]][2])
mut.data.eg.dv$line <- sapply(mut.data.eg.dv$sample, function(i) strsplit(as.character(i), split = "-")[[1]][1])
mut.data.eg.dv$freq2 <- as.numeric(as.character(sub("%", "",mut.data.eg.dv$freq)))
mut.data.eg.dv[is.na(mut.data.eg.dv$transfer),"transfer"] <- 152
## select UA3 line (change it if you want to filter for the line)
mut.data.ua3.dv <- mut.data.eg.dv[grep("", mut.data.eg.dv$sample),]
# order based on transfer
mut.data.ua3.dv <- mut.data.ua3.dv[order(as.numeric(as.character(mut.data.ua3.dv$transfer))),]
# add line info
mut.data.ua3.dv$eline <- sapply(mut.data.ua3.dv$line, function(i) strsplit(as.character(i), split = "_", fixed = T)[[1]][2])
# get ancestral mutations
ancestor.mutations <- mut.data.dv[which(mut.data.dv$sample == "AN_Dv-Ancestor-1"),"variant_id"]
for(variant in unique(mut.data.ua3.dv$variant_id)){
if(variant %in% ancestor.mutations){
mut.data.ua3.dv[which(mut.data.ua3.dv$variant_id == variant),"ancestor"] <- "ancestral"
} else {
mut.data.ua3.dv[which(mut.data.ua3.dv$variant_id == variant),"ancestor"] <- "non-ancestral"
}
}
###### format early generations mutations data for mmp
mut.data.eg.mm <- mut.data.mm[grep(c("(AN_Coculture-Ancestor|_UE3|CI(_3_|_7_|_20_|_27_|_29_|_3(1_|2_|4_)|_10_))"), mut.data.mm$sample),]
mut.data.eg.mm$transfer <- sapply(mut.data.eg.mm$sample, function(i) strsplit(as.character(i), split = "-")[[1]][2])
mut.data.eg.mm$line <- sapply(mut.data.eg.mm$sample, function(i) strsplit(as.character(i), split = "-")[[1]][1])
mut.data.eg.mm$freq2 <- as.numeric(as.character(sub("%", "",mut.data.eg.mm$freq)))
mut.data.eg.mm[is.na(mut.data.eg.mm$transfer),"transfer"] <- 152
## select UA3 line (change it if you want to filter for the line)
mut.data.ua3.mm <- mut.data.eg.mm[grep("", mut.data.eg.mm$sample),]
# order based on transfer
mut.data.ua3.mm <- mut.data.ua3.mm[order(as.numeric(as.character(mut.data.ua3.mm$transfer))),]
# add line info
mut.data.ua3.mm$eline <- sapply(mut.data.ua3.mm$line, function(i) strsplit(as.character(i), split = "_", fixed = T)[[1]][2])
# get ancestral mutations
ancestor.mutations.mm <- mut.data.mm[which(mut.data.mm$sample == "AN_Coculture-Ancestor"),"variant_id"]
for(variant in unique(mut.data.ua3.mm$variant_id)){
if(variant %in% ancestor.mutations.mm){
mut.data.ua3.mm[which(mut.data.ua3.mm$variant_id == variant),"ancestor"] <- "ancestral"
} else {
mut.data.ua3.mm[which(mut.data.ua3.mm$variant_id == variant),"ancestor"] <- "non-ancestral"
}
}
#slect non-ancestral mutations only dvh
#mutations.1000.dv <- mut.data.ua3.dv[which(mut.data.ua3.dv$ancestor != "ancestral"),]
mutations.1000.dv <- mut.data.ua3.dv
#slect non-ancestral mutations only Mm
#mutations.1000.mm <- mut.data.ua3.mm[which(mut.data.ua3.mm$ancestor != "ancestral"),]
## if you keep ancestral mutations for plotting
mutations.1000.mm <- mut.data.ua3.mm
plot.line.pheatmap <-function(org=c("dvh","mmp")){
library(RColorBrewer)
library(viridis)
dv.order <- c("TG_UE3",
"EP_UE3_03","UA3.152.03.D01","UA3.152.03.D02","UA3.152.03.D03",
"EP_UE3_10","UA3.152.10.D02","UA3.152.10.D03",#"UA3.152.10.D01", removed clone 1
"EP_UE3_09","UA3.152.09.D01","UA3.152.09.D02","UA3.152.09.D03"
)
#dv.order <- sub("UA3", "UE3", dv.order)
mm.order <- c("TG_UE3",
"EP_UE3_03","UA3.152.03.M01","UA3.152.03.M02","UA3.152.03.M03",
"EP_UE3_10","UA3.152.10.M02","UA3.152.10.M03",#"UA3.152.10.M01", removed clone 1
"EP_UE3_09","UA3.152.09.M01","UA3.152.09.M02","UA3.152.09.M03"
)
#mm.order <- sub("UA3", "UE3", mm.order)
## Select samples
if(org == "dvh"){
sample.order <- dv.order
mutations.file <- mutations.1000.dv
mutations.file$variant_id <- sub("Chr-", "", mutations.file$variant_id)
mutations.file$variant_id <- sub("pDV-", "", mutations.file$variant_id)
}
if(org == "mmp"){
sample.order <- mm.order
mutations.file <- mutations.1000.mm
mutations.file$variant_id <- sub("BX950229-", "", mutations.file$variant_id)
}
## select samples for freq heatmap
my.samples.freq <- subset(mutations.file, sample.name %in% sample.order)
## create matrix for freq
my.matrix.freq <- matrix(
nrow = length(unique(my.samples.freq$variant_id)),
ncol = length(unique(my.samples.freq$sample.name)),
dimnames = list(unique(my.samples.freq$variant_id),
unique(my.samples.freq$sample.name))
)
## fill the matrix
for(variant in unique(my.samples.freq$variant_id)){
for(sample in my.samples.freq$sample.name){
my.data <- my.samples.freq[which(my.samples.freq$variant_id == variant & my.samples.freq$sample.name == sample),"freq2"]
if(length(my.data) != 0){
my.matrix.freq[variant,sample] <- my.data
}else{
my.matrix.freq[variant,sample] <- 0
}
}
}
## custom sort matrix
my.matrix.freq <- my.matrix.freq[,order(factor(colnames(my.matrix.freq), levels=sample.order))]
colnames(my.matrix.freq) <- sub("UA3","UE3",colnames(my.matrix.freq))
## annotations for the sample type
#annotations <- data.frame(Type = c("Early-Gen","Early-Gen","Early-Gen","Early-Gen","1K","EPD","Clonal","Clonal","Clonal","EPD","Clonal","Clonal","Clonal","EPD","Clonal","Clonal","Clonal"))
col.annot <- unique(data.frame(my.samples.freq[,c("sample.name","experiment")]))
row.names(col.annot) <- sub("UA3","UE3", col.annot$sample.name)
col.annot <- col.annot[-1]
## plot heatmap
pheatmap(my.matrix.freq,cluster_cols = F, annotation_legend = F, treeheight_row = 0, scale = "none", color = viridis(30),border_color = "#333333", annotation_col = col.annot,drop_levels = F)
}
## multiple plotting
mm <- list(plot.line.pheatmap(org = "dvh")[[4]])
mm[[2]] <- plot.line.pheatmap(org = "mmp")[[4]]
z <- do.call(grid.arrange,mm)
plot(z)
#### dv matrix for SiFit
sifit_files_dv <- function() {
my.samples <-
subset(
mutations.1000.dv,
sample.name %in% c(
"TG_UE3",
"EP_UE3_03",
"UA3.152.03.D01",
"UA3.152.03.D02",
"UA3.152.03.D03",
"EP_UE3_10",
# "UA3.152.10.D01", # removed clone 1 due to coverage
"UA3.152.10.D02",
"UA3.152.10.D03",
"EP_UE3_09",
"UA3.152.09.D01",
"UA3.152.09.D02",
"UA3.152.09.D03"
)
)
# create matrix
my.matrix <- matrix(
nrow = length(unique(my.samples$variant_id)),
ncol = length(unique(my.samples$sample.name)),
dimnames = list(
unique(my.samples$variant_id),
unique(my.samples$sample.name)
)
)
## Fill the matrix
for (variant in unique(my.samples$variant_id)) {
for (sample in my.samples$sample.name) {
my.data <-
my.samples[which(my.samples$variant_id == variant &
my.samples$sample.name == sample), "freq2"]
if (length(my.data) != 0) {
my.matrix[variant, sample] <- 1
} else{
my.matrix[variant, sample] <- 0
}
}
}
## Write matrix files for SiFIt analysis
# mutation names
write.table(
row.names(my.matrix),
file = "~/Documents/GitHub/evolution-of-syntrophy/Clonal-Isolates/siFit/ue3_dv_mutation_names.txt",
sep = " ",
row.names = F,
col.names = F,
quote = F
)
# sample names
writeLines(colnames(my.matrix), con = "~/Documents/GitHub/evolution-of-syntrophy/Clonal-Isolates/siFit/ue3_dv_sample_names.txt", sep =
" ")
# mutation matrix
my.matrix <- cbind(id = seq(1, dim(my.matrix)[1]), my.matrix)
write.table(
my.matrix,
file = "~/Documents/GitHub/evolution-of-syntrophy/Clonal-Isolates/siFit/ue3_dv_mutation_matrix.txt",
sep = " ",
row.names = F,
col.names = F,
quote = F
)
}
#### mm matrix for SiFit
sifit_files_mm <- function() {
my.samples.mm <-
subset(
mutations.1000.mm,
sample.name %in% c(
"TG_UE3",
"EP_UE3_03",
"UA3.152.03.M01",
"UA3.152.03.M02",
"UA3.152.03.M03",
"EP_UE3_10",
"UA3.152.10.M01",
"UA3.152.10.M02",
"UA3.152.10.M03",
"EP_UE3_09",
"UA3.152.09.M01",
"UA3.152.09.M02",
"UA3.152.09.M03"
)
)
## create matrix
my.matrix.mm <- matrix(
nrow = length(unique(my.samples.mm$variant_id)),
ncol = length(unique(my.samples.mm$sample.name)),
dimnames = list(
unique(my.samples.mm$variant_id),
unique(my.samples.mm$sample.name)
)
)
## Fill the matrix
for (variant in unique(my.samples.mm$variant_id)) {
for (sample in my.samples.mm$sample.name) {
my.data <-
my.samples.mm[which(my.samples.mm$variant_id == variant &
my.samples.mm$sample.name == sample), "freq2"]
if (length(my.data) != 0) {
my.matrix.mm[variant, sample] <- 1
} else{
my.matrix.mm[variant, sample] <- 0
}
}
}
## Write matrix files for SiFIt analysis
# mutation names
write.table(
row.names(my.matrix.mm),
file = "~/Documents/GitHub/evolution-of-syntrophy/Clonal-Isolates/siFit/ue3_mm_mutation_names.txt",
sep = " ",
row.names = F,
col.names = F,
quote = F
)
# sample names
writeLines(colnames(my.matrix.mm), con = "~/Documents/GitHub/evolution-of-syntrophy/Clonal-Isolates/siFit/ue3_mm_sample_names.txt", sep =
" ")
# matrix
my.matrix.mm <-
cbind(id = seq(1, dim(my.matrix.mm)[1]), my.matrix.mm)
write.table(
my.matrix.mm,
file = "~/Documents/GitHub/evolution-of-syntrophy/Clonal-Isolates/siFit/ue3_mm_mutation_matrix.txt",
sep = " ",
row.names = F,
col.names = F,
quote = F
)
}
|
7606a692e6ddce794bd065dc4d98bfa9acf64056 | c85471f60e9d5c462de6c60c880d05898ec81411 | /cache/gdatascience|tidytuesday|tv_ratings.R | 1c23a74ccdd0e528b8b7c04fc92abf67a3e9922a | [
"CC-BY-4.0",
"MIT"
] | permissive | a-rosenberg/github-content-scraper | 2416d644ea58403beacba33349ee127e4eb42afe | ed3340610a20bb3bd569f5e19db56008365e7ffa | refs/heads/master | 2020-09-06T08:34:58.186945 | 2019-11-15T05:14:37 | 2019-11-15T05:14:37 | 220,376,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,957 | r | gdatascience|tidytuesday|tv_ratings.R | ## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ------------------------------------------------------------------------
library(tidyverse)
theme_set(theme_light())
tv_ratings <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-01-08/IMDb_Economist_tv_ratings.csv")
summary(tv_ratings)
## ------------------------------------------------------------------------
# unique titleId
summary(unique(tv_ratings$titleId))
# unique title
summary(unique(tv_ratings$title))
## ------------------------------------------------------------------------
same_title <- tv_ratings %>%
group_by(title, titleId) %>%
summarise(avg_share = mean(share)) %>%
group_by(title) %>%
summarise(n = n()) %>%
filter(n > 1) %>%
pull(title)
tv_ratings %>%
filter(title %in% same_title) %>%
group_by(titleId, title) %>%
summarise(start_date = min(date)) %>%
arrange(title, start_date) %>%
ungroup() %>%
select(-titleId)
## ------------------------------------------------------------------------
tv_ratings_tidy <- tv_ratings %>%
mutate(genre = strsplit(genres, ",")) %>%
unnest(genre)
tv_ratings_tidy %>%
group_by(genre) %>%
summarise(avg_rating = mean(av_rating)) %>%
ungroup() %>%
mutate(genre = fct_reorder(genre, avg_rating)) %>%
ggplot(aes(genre, avg_rating, fill = genre)) +
geom_col(show.legend = FALSE) +
coord_flip() +
labs(x = "",
y = "Average rating",
title = "What genres have the best ratings?",
subtitle = "Reality-TV gets the worst ratings by far",
caption = "Designer: Tony Galvan @gdatascience1 | Source: IMDb")
## ------------------------------------------------------------------------
tv_ratings_tidy %>%
group_by(genre) %>%
summarise(avg_share = mean(share)) %>%
ungroup() %>%
mutate(genre = fct_reorder(genre, avg_share)) %>%
ggplot(aes(genre, avg_share, fill = genre)) +
geom_col(show.legend = FALSE) +
coord_flip() +
labs(x = "",
y = "Average share",
title = "What genres have the biggest share",
subtitle = "Adventure and Sci-Fi shows get a 3 share on average",
caption = "Designer: Tony Galvan @gdatascience1 | Source: IMDb")
## ------------------------------------------------------------------------
tv_shows <- tv_ratings %>%
group_by(titleId, title) %>%
summarise(tot_seasons = n(),
max_season = max(seasonNumber),
min_date = min(date),
max_date = max(date),
avg_rating = mean(av_rating),
max_rating = max(av_rating),
min_rating = min(av_rating),
tot_share = sum(share),
avg_share = mean(share),
max_share = max(share),
min_share = min(share))
## ------------------------------------------------------------------------
tv_ratings %>%
arrange(desc(av_rating)) %>%
head(5)
|
bbf09f92f36f03e66da756313aa1ab916ce604db | e6c710bca8f7ff0addbbb1e6b59d4c374e778a47 | /tests/testthat/test-genbody.R | 95f5b9713bf8fb9653b0768ffc2cd23678e5bb3e | [] | no_license | liubianshi/tabreg | 480017c09221339ff8928121672613b14d62532d | 364ad6c5bbf4ae3c1c786295049dfa18194e2ba2 | refs/heads/master | 2023-02-16T19:53:17.074809 | 2021-01-05T03:57:04 | 2021-01-05T03:57:04 | 223,850,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,775 | r | test-genbody.R | context("generate body data.table from estimate result")
l.reg <- local({
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
clotting <- data.frame(
u = c(5,10,15,20,30,40,60,80,100),
lot1 = c(118,58,42,35,27,25,21,19,18),
lot2 = c(69,35,26,21,18,16,13,12,12)
)
glm.1 <- glm(lot1 ~ log(u), data = clotting, family = Gamma)
glm.2 <- glm(lot2 ~ log(u), data = clotting, family = Gamma)
l <- list(lm.D9, lm.D90, glm.1, glm.2)
names(l) <- paste0("R", seq_along(l))
l
})
test_that("adjust variable list", {
expect_identical(adjvari(NULL, NULL)$name, character(0))
expect_identical(adjvari(NULL, NULL)$label, character(0))
result <- adjvari(NULL, l.reg)
expect_length(result, 2L)
expect_identical(result$name, result$label)
vari = list(name = c("groupTrt", "log(u)"),
label = list("log(u)" = "u_ln"))
result2 <- adjvari(vari, l.reg)
expect_equal(result2$label, c("groupTrt", "u_ln"))
expect_error(adjvari(list(label = "N"), l.reg))
})
test_that("generate body table", {
esti <- list(estimate = 3L, std.error = "(3)", singlerow = T)
star = adjstar(list(0.01, "*"))
vari <- list(c("groupTrt", "log(u)"), list("log(u)" = "log_u"))
result <- genbody(esti, l.reg, vari, star, "text")
expect_equal(result$term, c("groupTrt", "log_u"))
expect_equal(result[1, R1], "-0.371 (0.311)")
expect_equal(result[2, R3], "0.015* (0.000)")
esti$singlerow <- FALSE
result2 <- genbody(esti, l.reg, vari, star, "text")
expect_equal(result2$term, c("groupTrt", "", "log_u", ""))
expect_equal(result2[1:2, R1], c("-0.371", "(0.311)"))
})
test_that("test custom function", {
.genesti <- function(reg, m = 1L) {
reg_coef <- summary(reg)$coefficients
coef_df <- as.data.frame(reg_coef, row.names = FALSE)
coefnames <- c("estimate", "std.error", "statistic", "p.value")
names(coef_df) <- coefnames
coef_df$estimate <- coef_df$estimate * m
coef_df$std.error <- coef_df$std.error * m
coef_df$term <- row.names(reg_coef)
coef_df[c("term", coefnames)]
}
esti <- list(estimate = 3L, std.error = "(3)", singlerow = T,
fun = .genesti, fun.args = list(m = 10L))
star = adjstar(list(0.01, "*"))
vari <- list(c("groupTrt", "log(u)"), list("log(u)" = "log_u"))
result <- genbody(esti, l.reg, vari, star, "text")
expect_equal(result[1, R1], "-3.710 (3.114)")
expect_equal(result[2, R3], "0.153* (0.004)")
})
|
24d5b505c0269ee1e8cad294858365bf14eef92c | 288cd32888512c1f309bf32410ab27eebfd19745 | /Code/Other_plots/Experimental_data.R | f900c2d92f432fc16e9b393b809e98a22d187211 | [] | no_license | sebapersson/SUC2_paper | 297e8ea8357bf08208071bae374ec5cc083f195d | 3bc8bfe91d39139c2e1e9c4eb1da818bd574da54 | refs/heads/master | 2023-01-01T07:39:53.611278 | 2020-10-15T12:56:35 | 2020-10-15T12:56:35 | 255,879,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,261 | r | Experimental_data.R | library(tidyverse)
library(ggthemes)
library(stringr)
# This file produces the plots for the invertase experimental data. To keep a consistent
# graphical profile through the paper, all results are plotted (including non model results
# are plotted in R).
# Outputs:
# A bar-chart showing invertase activity in high and low glucose
# General plotting parameters
my_theme <- theme_tufte(base_size = 16) + theme(plot.title = element_text(hjust = 0.5, size = 14, face="bold"),
plot.subtitle = element_text(hjust = 0.5)) +
theme(axis.title=element_text(size=18))
my_colors <- c("#1f78b4", "#33a02c", "#08519c", "#006d2c", "#ff7f00", "#a6cee3")
BASE_HEIGHT <- 5
BASE_WIDTH <- 7.0
# Function that processes the invertase data and outputs the result in a bar-chart
# Args:
# void
# Returns:
# void
process_invertase_data <- function()
{
data_values_low <- tibble(value = c(714.0953, 752.4738, 18.52177),
sd = c(24.76474, 94.90088, 4.324993),
type = as.factor(c("wt", "dreg1", "dreg1dsnf1")),
glc = as.factor(c("low", "low", "low"))) %>%
mutate(frame = c(0, 0, 850))
data_high <- tibble(value = c(158.1396, 714.0953, 33.55298, 31.47122),
sd = c(24.76474, 8.004206629, 19.09195, 8.117907953),
type = as.factor(c("wt", "wt", "dsnf1", "dsnf1")),
glc = as.factor(c("high", "low", "high", "low"))) %>%
mutate(frame = c(0, 0, 850, 0))
p1 <- ggplot(data_values_low, aes(type, value)) +
geom_bar(stat='identity', position='dodge', color ="black", fill = my_colors[6]) +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd), width=0.2, position=position_dodge(.9)) +
scale_x_discrete(limit = c("wt", "dreg1", "dreg1dsnf1"),
labels = c("wt", "reg1", "reg1_snf1")) +
geom_rangeframe(aes(type, frame), sides = "l") +
labs(y = "Invertase activity [mU/mg]", x = "") +
ylim(0, 850) +
my_theme + theme(legend.position = "none",
axis.title.x = element_blank(),
axis.text.x = element_blank())
p2 <- ggplot(data_high, aes(type, value, fill = glc)) +
geom_bar(stat='identity', position='dodge', color ="black") +
geom_errorbar(aes(ymin = value-sd, ymax = value+sd), width=0.2, position=position_dodge(.9)) +
scale_x_discrete(limit = c("wt", "dsnf1"),
labels = c("wt", "snf1")) +
geom_rangeframe(aes(type, frame), sides = "l") +
labs(y = "Invertase activity [mU/mg]", x = "") +
scale_fill_manual(values = my_colors[c(1, 6)]) +
ylim(0, 850) +
my_theme + theme(legend.position = "none",
axis.title.x = element_blank(),
axis.text.x = element_blank())
dir_save <- "../../Result/Experimental_data/"
if(!dir.exists(dir_save)) dir.create(dir_save)
path_save1 <- str_c(dir_save, "Invertase_activity_low.pdf")
path_save2 <- str_c(dir_save, "Invertase_activity_high.pdf")
ggsave(path_save1, plot = p1, width = BASE_WIDTH, height = BASE_HEIGHT)
ggsave(path_save2, plot = p2, width = BASE_WIDTH, height = BASE_HEIGHT)
return(0)
}
process_invertase_data()
|
40ca5e3e704da4bd645c67188a5460cfcf85e601 | f3f5b8a3ee512ac1d3e540eb9e293642a2373ce7 | /creditmodel_필요한함수선정.R | 33c2cbd66505a64bfe467bd1843cf665ff4f39a2 | [] | no_license | Yang-Munil/diabetes_related_occupation_in_Korean_population | 184abc2440f2c9a7dee28f2524692524de19d218 | 36ce066fb6dc794068d56bcccc4091f5a13e2fa4 | refs/heads/main | 2023-07-08T14:55:10.525942 | 2021-08-12T10:42:56 | 2021-08-12T10:42:56 | 346,424,723 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,145 | r | creditmodel_필요한함수선정.R | library(creditmodel)
library(ggplot2)
# equal sample size breaks
equ_breaks = cut_equal(dat = UCICreditCard[, "PAY_AMT2"], g = 10)
# select best bins
bins_control = list(bins_num = 10, bins_pct = 0.02, b_chi = 0.02,
b_odds = 0.1, b_psi = 0.05, b_or = 0.15, mono = 0.3, odds_psi = 0.1, kc = 1)
select_best_breaks(dat = UCICreditCard, x = "PAY_AMT2", breaks = equ_breaks,
target = "default.payment.next.month", occur_time = "apply_date",
sp_values = NULL, bins_control = bins_control)
# fast_high_cor_filter In a highly correlated variable group, select the variable with the highest IV.
# high_cor_filter In a highly correlated variable group, select the variable with the highest IV.
# calculate iv for each variable.
iv_list = feature_selector(dat_train = UCICreditCard[1:1000,], dat_test = NULL,
target = "default.payment.next.month",
occur_time = "apply_date",
filter = c("IV"), cv_folds = 1, iv_cp = 0.01,
ex_cols = "ID$|date$|default.payment.next.month$",
save_data = FALSE, vars_name = FALSE)
high_cor_list = fast_high_cor_filter(dat = UCICreditCard[1:1000,],
com_list = iv_list, save_data = FALSE,
ex_cols = "ID$|date$|default.payment.next.month$",
p = 0.9, cor_class = FALSE ,var_name = FALSE)
# get_breaks is for generating optimal binning for numerical and nominal variables. The get_breaks_all is a simpler wrapper for get_breaks.
#controls
"## tree_control the list of tree parameters.
• p the minimum percent of observations in any terminal <leaf> node. 0 < p<
1; 0.01 to 0.1 usually work.
• cp complexity parameter. the larger, the more conservative the algorithm
will be. 0 < cp< 1 ; 0.0001 to 0.0000001 usually work.
• xval number of cross-validations.Default: 5
• max_depth maximum depth of a tree. Default: 10
## bins_control the list of parameters.
• bins_num The maximum number of bins. 5 to 10 usually work. Default:
10
• bins_pct The minimum percent of observations in any bins. 0 < bins_pct
< 1 , 0.01 to 0.1 usually work. Default: 0.02
• b_chi The minimum threshold of chi-square merge. 0 < b_chi< 1; 0.01 to
0.1 usually work. Default: 0.02
• b_odds The minimum threshold of odds merge. 0 < b_odds < 1; 0.05 to 0.2
usually work. Default: 0.1
• b_psi The maximum threshold of PSI in any bins. 0 < b_psi < 1 ; 0 to 0.1
usually work. Default: 0.05
• b_or The maximum threshold of G/B index in any bins. 0 < b_or < 1 ; 0.05
to 0.3 usually work. Default: 0.15"
tree_control = list(p = 0.02, cp = 0.000001, xval = 5, maxdepth = 10)
bins_control = list(bins_num = 10, bins_pct = 0.02, b_chi = 0.02, b_odds = 0.1,
b_psi = 0.05, b_or = 15, mono = 0.2, odds_psi = 0.1, kc = 5)
# get categrory variable breaks
b = get_breaks(dat = UCICreditCard[1:1000,], x = "MARRIAGE",
target = "default.payment.next.month",
occur_time = "apply_date",
sp_values = list(-1, "missing"),
tree_control = tree_control, bins_control = bins_control)
# get numeric variable breaks
b2 = get_breaks(dat = UCICreditCard[1:1000,], x = "PAY_2",
target = "default.payment.next.month",
occur_time = "apply_date",
sp_values = list(-1, "missing"),
tree_control = tree_control, bins_control = bins_control)
# get breaks of all predictive variables
b3 = get_breaks_all(dat = UCICreditCard[1:10000,], target = "default.payment.next.month",
x_list = c("MARRIAGE","PAY_2"),
occur_time = "apply_date", ex_cols = "ID",
sp_values = list(-1, "missing"),
tree_control = tree_control, bins_control = bins_control,
save_data = FALSE)
b3
# get_bins_table is used to generates summary information of varaibles. get_bins_table_all can generates bins table for all specified independent variables. (중요)
breaks_list = get_breaks_all(dat = UCICreditCard, x_list = names(UCICreditCard)[3:4],
target = "default.payment.next.month", equal_bins =TRUE,best = FALSE, g=5, ex_cols = "ID|apply_date", save_data = FALSE)
get_bins_table <- get_bins_table_all(dat = UCICreditCard, breaks_list = breaks_list, target = "default.payment.next.month")
head(get_bins_table)
get_bins_table <- as.data.frame(get_bins_table)
################# 본격적인 스코어카드 생성 ################
#woe transforming
train_woe = woe_trans_all(dat = dat_train,
target = "target",
breaks_list = breaks_list,
woe_name = FALSE)
test_woe = woe_trans_all(dat = dat_test,
target = "target",
breaks_list = breaks_list,
note = FALSE)
Formula = as.formula(paste("target", paste(x_list, collapse = ' + '), sep = ' ~ '))
set.seed(46)
lr_model = glm(Formula, data = train_woe[, c("target", x_list)], family = binomial(logit))
#get LR coefficient
dt_imp_LR = get_logistic_coef(lg_model = lr_model, save_data = FALSE)
bins_table = get_bins_table_all(dat = dat_train, target = "target",
x_list = x_list,dat_test = dat_test,
breaks_list = breaks_list, note = FALSE)
#score card
LR_score_card = get_score_card(lg_model = lr_model, bins_table, target = "target")
#scoring
train_pred = dat_train[, c("ID", "apply_date", "target")]
test_pred = dat_test[, c("ID", "apply_date", "target")]
train_pred$pred_LR = score_transfer(model = lr_model,
tbl_woe = train_woe,
save_data = TRUE)[, "score"]
test_pred$pred_LR = score_transfer(model = lr_model,
tbl_woe = test_woe, save_data = FALSE)[, "score"]
train_pred$target <- as.factor(train_pred$target)
ggplot(train_pred, aes(x=pred_LR, fill = target, color = target)) +
geom_histogram(aes(y=..density..), alpha=0.5, position = "dodge", binwidth = 20) +
theme(legend.position = "top") +
theme_minimal() +
theme(legend.position = "top") +
ggtitle("스코어 분포") +
theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 20, color = "black")) +
labs(x="스코어", y="인원수") +
theme(axis.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 20, color = "black"))
test_pred$target <- as.factor(test_pred$target)
ggplot(test_pred, aes(x=pred_LR, fill = target, color = target)) +
geom_histogram(aes(y=..density..), alpha=0.5, position = "dodge", binwidth = 20) +
theme(legend.position = "top") +
theme_minimal() +
theme(legend.position = "top") +
ggtitle("스코어 분포") +
theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 20, color = "black")) +
labs(x="스코어", y="인원수") +
theme(axis.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 20, color = "black"))
############## 모델 매월 평가할 때 쓰일 패키지 함수 ###############
# model result plots model_result_plot is a wrapper of following:
# 1. perf_table is for generating a model performance table.
# 2. ks_plot is for K-S.
# 3. roc_plot is for ROC.
# 4. lift_plot is for Lift Chart.
# 5. score_distribution_plot is for ploting the score distribution.
sub = cv_split(UCICreditCard, k = 30)[[1]]
dat = UCICreditCard[sub,]
dat = re_name(dat, "default.payment.next.month", "target")
x_list = c("PAY_0", "LIMIT_BAL", "PAY_AMT5", "PAY_3", "PAY_2")
dat = data_cleansing(dat, target = "target", obs_id = "ID",x_list = x_list,
occur_time = "apply_date", miss_values = list("", -1))
dat = process_nas(dat,default_miss = TRUE)
train_test = train_test_split(dat, split_type = "OOT", prop = 0.7,
occur_time = "apply_date")
dat_train = train_test$train
dat_test = train_test$test
Formula = as.formula(paste("target", paste(x_list, collapse = ' + '), sep = ' ~ '))
set.seed(46)
lr_model = glm(Formula, data = dat_train[, c("target", x_list)], family = binomial(logit))
dat_train$pred_LR = round(predict(lr_model, dat_train[, x_list], type = "response"), 5)
dat_test$pred_LR = round(predict(lr_model, dat_test[, x_list], type = "response"), 5)
dat_train$Score <- p_to_score(p = dat_train$pred_LR, PDO = 20, base = 1000, ratio = 1)
dat_test$Score <- p_to_score(p = dat_test$pred_LR, PDO = 20, base = 1000, ratio = 1)
# model evaluation
perf_table <- perf_table(train_pred = dat_train, test_pred = dat_test, target = "target", score = "pred_LR")
head(perf_table)
ks_plot(train_pred = dat_train, test_pred = dat_test, target = "target", score = "Score")
roc_plot(train_pred = dat_train, test_pred = dat_test, target = "target", score = "Score")
lift_plot(train_pred = dat_train, test_pred = dat_test, target = "target", score = "Score")
score_distribution_plot(train_pred = dat_train, test_pred = dat_test,
target = "target", score = "Score")
model_result_plot(train_pred = dat_train, test_pred = dat_test,
target = "target", score = "Score")
library(InformationValue)
data('ActualsAndScores')
optimalCutoff(actuals=ActualsAndScores$Actuals,
predictedScores=ActualsAndScores$PredictedScores, optimiseFor="Both", returnDiagnostics=TRUE)
ks
data('ActualsAndScores')
plotROC(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
somersD(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
|
ea9ba0543ff85a236dc896205289b8a773c60c36 | 35f2d27328ea1ce21fe6b88e4581784195d5b6d9 | /Shiny/Plots/test.r | 106a2a66ea565754598b0c2df81705eada0a606d | [] | no_license | alfcrisci/devium | a5430872af7b60d4c68b858f84506f7627cd6190 | 2d0f342fa50d9d9b45affea04743f21844134ff9 | refs/heads/master | 2021-01-18T19:14:59.333668 | 2013-09-25T22:52:54 | 2013-09-25T22:52:54 | 13,155,442 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 136 | r | test.r |
install.packages("shiny")
library(shiny)
runGitHub("Devium", username = "dgrapov",ref = "master", subdir = "Shiny/Plots", port = 8100)
|
db213aa6a8e16dfc64474e086bec87d687b86598 | 04c2710db9de87bff22def23f926c3ef7d804614 | /r_plotting.R | aeb0ea50867f1297f2dcd0812711d4424c0ee412 | [] | no_license | jhess90/classification_scripts | 840b76372954f876903af1ffc46467c0d698ca8b | ab3501b21b3f40f9d138493c6beda9fbf2b4a3ee | refs/heads/master | 2021-01-21T14:24:47.783155 | 2019-04-02T16:43:18 | 2019-04-02T16:43:18 | 59,316,441 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,217 | r | r_plotting.R | library(openxlsx)
library(ggplot2)
library(reshape2)
#source("~/dropbox/mult_rp_files/r_test/multiplot.R")
source("~/Dropbox/mult_rp_files/r_test/multiplot.R")
library(zoo)
library(gplots)
library(RColorBrewer)
library(abind)
library(gridGraphics)
library(grid)
library(gridExtra)
saveAsPng <- T
file_list <- c('nl_avg_data_M1_dict_total.xlsx','nl_avg_data_S1_dict_total.xlsx','nl_avg_data_PmD_dict_total.xlsx')
catch_file_list <- c('catch_nl_avg_data_M1_dict_total.xlsx','catch_nl_avg_data_S1_dict_total.xlsx','catch_nl_avg_data_PmD_dict_total.xlsx')
region_list <- c('M1','S1','PmD')
time <- seq(from=-0.35,to=1.0,by=0.05)
gf_total <- read.xlsx('gf_avg_data_M1_dict_total.xlsx',sheet=1,colNames=T)
gf_time <- seq(-0.35,1.0,by=0.005)
#########
plot_gf <- function(gf_value,std_value,key){
png(paste(key,"_gf.png",sep=""),width=6,height=4,units="in",res=500)
#cat(key,'\n')
gf_avg <- rollmeanr(gf_value,3)
gf_std <- rollmeanr(std_value,3)
#align with rollmeanr binned data
gf_avg <- gf_avg[28:298]
gf_std <- gf_std[28:298]
#gf_avg <- gf_value[10:50]
#gf_std <- std_value[10:50]
#gf_time <- seq(-0.45,0-.25,by=0.005)
upper <- gf_avg + gf_std
lower <- gf_avg - gf_std
gf_df <- data.frame(gf_time,gf_avg,upper,lower)
gf_melt <- melt(gf_df,id.vars="gf_time",variable_name="value")
p <- ggplot(data=gf_melt$gf_avg,aes(x=gf_time,y=gf_avg)) + geom_line() + theme(plot.margin=unit(c(0.5,1.5,0.5,3.0),"cm")) #margin order: top,right,btm,left
p <- p + geom_ribbon(aes(ymin=gf_df$lower,ymax=gf_df$upper,alpha=0.15),show.legend = F) + geom_vline(xintercept=0)
p <- p + labs(title=paste("gripforce",key),y="unit", x="time(s)")
plot(p)
dev.off()
}
for(region_index in 1:length(file_list)){
cat("\nplotting region:",region_list[region_index])
filename = file_list[region_index]
wb <- loadWorkbook(filename)
num_sheets <- length(sheets(wb))
total_array_name <- paste(region_list[region_index],"_unit_info",sep="")
total_array <- array(NA,dim=c(28,32,num_sheets))
r0_succ_cue_name <- paste(region_list[region_index],"_r0_succ_cue_all",sep="")
r1_succ_cue_name <- paste(region_list[region_index],"_r1_succ_cue_all",sep="")
r2_succ_cue_name <- paste(region_list[region_index],"_r2_succ_cue_all",sep="")
r3_succ_cue_name <- paste(region_list[region_index],"_r3_succ_cue_all",sep="")
r0_succ_result_name <- paste(region_list[region_index],"_r0_succ_result_all",sep="")
r1_succ_result_name <- paste(region_list[region_index],"_r1_succ_result_all",sep="")
r2_succ_result_name <- paste(region_list[region_index],"_r2_succ_result_all",sep="")
r3_succ_result_name <- paste(region_list[region_index],"_r3_succ_result_all",sep="")
r0_fail_cue_name <- paste(region_list[region_index],"_r0_fail_cue_all",sep="")
r1_fail_cue_name <- paste(region_list[region_index],"_r1_fail_cue_all",sep="")
r2_fail_cue_name <- paste(region_list[region_index],"_r2_fail_cue_all",sep="")
r3_fail_cue_name <- paste(region_list[region_index],"_r3_fail_cue_all",sep="")
r0_fail_result_name <- paste(region_list[region_index],"_r0_fail_result_all",sep="")
r1_fail_result_name <- paste(region_list[region_index],"_r1_fail_result_all",sep="")
r2_fail_result_name <- paste(region_list[region_index],"_r2_fail_result_all",sep="")
r3_fail_result_name <- paste(region_list[region_index],"_r3_fail_result_all",sep="")
p0_succ_cue_name <- paste(region_list[region_index],"_p0_succ_cue_all",sep="")
p1_succ_cue_name <- paste(region_list[region_index],"_p1_succ_cue_all",sep="")
p2_succ_cue_name <- paste(region_list[region_index],"_p2_succ_cue_all",sep="")
p3_succ_cue_name <- paste(region_list[region_index],"_p3_succ_cue_all",sep="")
p0_succ_result_name <- paste(region_list[region_index],"_p0_succ_result_all",sep="")
p1_succ_result_name <- paste(region_list[region_index],"_p1_succ_result_all",sep="")
p2_succ_result_name <- paste(region_list[region_index],"_p2_succ_result_all",sep="")
p3_succ_result_name <- paste(region_list[region_index],"_p3_succ_result_all",sep="")
p0_fail_cue_name <- paste(region_list[region_index],"_p0_fail_cue_all",sep="")
p1_fail_cue_name <- paste(region_list[region_index],"_p1_fail_cue_all",sep="")
p2_fail_cue_name <- paste(region_list[region_index],"_p2_fail_cue_all",sep="")
p3_fail_cue_name <- paste(region_list[region_index],"_p3_fail_cue_all",sep="")
p0_fail_result_name <- paste(region_list[region_index],"_p0_fail_result_all",sep="")
p1_fail_result_name <- paste(region_list[region_index],"_p1_fail_result_all",sep="")
p2_fail_result_name <- paste(region_list[region_index],"_p2_fail_result_all",sep="")
p3_fail_result_name <- paste(region_list[region_index],"_p3_fail_result_all",sep="")
all_r_succ_cue_name <- paste(region_list[region_index],"_all_r_succ_cue",sep="")
all_r_fail_cue_name <- paste(region_list[region_index],"_all_r_fail_cue",sep="")
all_r_succ_result_name <- paste(region_list[region_index],"_all_r_succ_result",sep="")
all_r_fail_result_name <- paste(region_list[region_index],"_all_r_fail_result",sep="")
all_p_succ_cue_name <- paste(region_list[region_index],"_all_p_succ_cue",sep="")
all_p_fail_cue_name <- paste(region_list[region_index],"_all_p_fail_cue",sep="")
all_p_succ_result_name <- paste(region_list[region_index],"_all_p_succ_result",sep="")
all_p_fail_result_name <- paste(region_list[region_index],"_all_p_fail_result",sep="")
no_r_succ_cue_name <- paste(region_list[region_index],"_no_r_succ_cue",sep="")
no_r_fail_cue_name <- paste(region_list[region_index],"_no_r_fail_cue",sep="")
no_r_succ_result_name <- paste(region_list[region_index],"_no_r_succ_result",sep="")
no_r_fail_result_name <- paste(region_list[region_index],"_no_r_fail_result",sep="")
no_p_succ_cue_name <- paste(region_list[region_index],"_no_p_succ_cue",sep="")
no_p_fail_cue_name <- paste(region_list[region_index],"_no_p_fail_cue",sep="")
no_p_succ_result_name <- paste(region_list[region_index],"_no_p_succ_result",sep="")
no_p_fail_result_name <- paste(region_list[region_index],"_no_p_fail_result",sep="")
r0_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r1_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r2_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r3_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r0_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r1_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r2_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r3_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r0_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r1_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r2_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r3_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r0_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r1_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r2_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r3_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p0_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p1_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p2_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p3_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p0_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p1_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p2_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p3_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p0_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p1_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p2_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p3_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p0_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p1_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p2_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p3_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_r_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_r_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_r_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_r_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_p_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_p_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_p_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
all_p_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_r_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_r_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_r_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_r_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_p_succ_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_p_fail_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_p_succ_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
no_p_fail_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
for (j in 1:num_sheets){
#cat('plotting unit', j,"\n")
tmp <- read.xlsx(filename,sheet = j, colNames=T)
png(paste(region_list[region_index],"unit_",j,".png",sep=""),width=8,height=6,units="in",res=500)
tmp2 <- tmp[1:28,1:32]
tmp2$p3_fail_cue <- rollmeanr(tmp$p3_fail_cue,3)
tmp2$p2_fail_cue <- rollmeanr(tmp$p2_fail_cue,3)
tmp2$p1_fail_cue <- rollmeanr(tmp$p1_fail_cue,3)
tmp2$p0_fail_cue <- rollmeanr(tmp$p0_fail_cue,3)
tmp2$r0_succ_cue <- rollmeanr(tmp$r0_succ_cue,3)
tmp2$r1_succ_cue <- rollmeanr(tmp$r1_succ_cue,3)
tmp2$r2_succ_cue <- rollmeanr(tmp$r2_succ_cue,3)
tmp2$r3_succ_cue <- rollmeanr(tmp$r3_succ_cue,3)
tmp2$p3_fail_result <- rollmeanr(tmp$p3_fail_result,3)
tmp2$p2_fail_result <- rollmeanr(tmp$p2_fail_result,3)
tmp2$p1_fail_result <- rollmeanr(tmp$p1_fail_result,3)
tmp2$p0_fail_result <- rollmeanr(tmp$p0_fail_result,3)
tmp2$r0_succ_result <- rollmeanr(tmp$r0_succ_result,3)
tmp2$r1_succ_result <- rollmeanr(tmp$r1_succ_result,3)
tmp2$r2_succ_result <- rollmeanr(tmp$r2_succ_result,3)
tmp2$r3_succ_result <- rollmeanr(tmp$r3_succ_result,3)
tmp2$p3_succ_cue <- rollmeanr(tmp$p3_succ_cue,3)
tmp2$p2_succ_cue <- rollmeanr(tmp$p2_succ_cue,3)
tmp2$p1_succ_cue <- rollmeanr(tmp$p1_succ_cue,3)
tmp2$p0_succ_cue <- rollmeanr(tmp$p0_succ_cue,3)
tmp2$r0_fail_cue <- rollmeanr(tmp$r0_fail_cue,3)
tmp2$r1_fail_cue <- rollmeanr(tmp$r1_fail_cue,3)
tmp2$r2_fail_cue <- rollmeanr(tmp$r2_fail_cue,3)
tmp2$r3_fail_cue <- rollmeanr(tmp$r3_fail_cue,3)
tmp2$p3_succ_result <- rollmeanr(tmp$p3_succ_result,3)
tmp2$p2_succ_result <- rollmeanr(tmp$p2_succ_result,3)
tmp2$p1_succ_result <- rollmeanr(tmp$p1_succ_result,3)
tmp2$p0_succ_result <- rollmeanr(tmp$p0_succ_result,3)
tmp2$r0_fail_result <- rollmeanr(tmp$r0_fail_result,3)
tmp2$r1_fail_result <- rollmeanr(tmp$r1_fail_result,3)
tmp2$r2_fail_result <- rollmeanr(tmp$r2_fail_result,3)
tmp2$r3_fail_result <- rollmeanr(tmp$r3_fail_result,3)
total_array[1:28,1:32,j] = data.matrix(tmp2)
#making array to compare between units
r0_succ_cue_matrix[j,] <- tmp2$r0_succ_cue
r1_succ_cue_matrix[j,] <- tmp2$r1_succ_cue
r2_succ_cue_matrix[j,] <- tmp2$r2_succ_cue
r3_succ_cue_matrix[j,] <- tmp2$r3_succ_cue
r0_succ_result_matrix[j,] <- tmp2$r0_succ_result
r1_succ_result_matrix[j,] <- tmp2$r1_succ_result
r2_succ_result_matrix[j,] <- tmp2$r2_succ_result
r3_succ_result_matrix[j,] <- tmp2$r3_succ_result
r0_fail_cue_matrix[j,] <- tmp2$r0_fail_cue
r1_fail_cue_matrix[j,] <- tmp2$r1_fail_cue
r2_fail_cue_matrix[j,] <- tmp2$r2_fail_cue
r3_fail_cue_matrix[j,] <- tmp2$r3_fail_cue
r0_fail_result_matrix[j,] <- tmp2$r0_fail_result
r1_fail_result_matrix[j,] <- tmp2$r1_fail_result
r2_fail_result_matrix[j,] <- tmp2$r2_fail_result
r3_fail_result_matrix[j,] <- tmp2$r3_fail_result
p0_succ_cue_matrix[j,] <- tmp2$p0_succ_cue
p1_succ_cue_matrix[j,] <- tmp2$p1_succ_cue
p2_succ_cue_matrix[j,] <- tmp2$p2_succ_cue
p3_succ_cue_matrix[j,] <- tmp2$p3_succ_cue
p0_succ_result_matrix[j,] <- tmp2$p0_succ_result
p1_succ_result_matrix[j,] <- tmp2$p1_succ_result
p2_succ_result_matrix[j,] <- tmp2$p2_succ_result
p3_succ_result_matrix[j,] <- tmp2$p3_succ_result
p0_fail_cue_matrix[j,] <- tmp2$p0_fail_cue
p1_fail_cue_matrix[j,] <- tmp2$p1_fail_cue
p2_fail_cue_matrix[j,] <- tmp2$p2_fail_cue
p3_fail_cue_matrix[j,] <- tmp2$p3_fail_cue
p0_fail_result_matrix[j,] <- tmp2$p0_fail_result
p1_fail_result_matrix[j,] <- tmp2$p1_fail_result
p2_fail_result_matrix[j,] <- tmp2$p2_fail_result
p3_fail_result_matrix[j,] <- tmp2$p3_fail_result
all_r_succ_cue_matrix[j,] = (tmp2$r1_succ_cue + tmp2$r2_succ_cue + tmp2$r3_succ_cue)/3
all_r_fail_cue_matrix[j,] = (tmp2$r1_fail_cue + tmp2$r2_fail_cue + tmp2$r3_fail_cue)/3
all_r_succ_result_matrix[j,] = (tmp2$r1_succ_result + tmp2$r2_succ_result + tmp2$r3_succ_result)/3
all_r_fail_result_matrix[j,] = (tmp2$r1_fail_result + tmp2$r2_fail_result + tmp2$r3_fail_result)/3
all_p_succ_cue_matrix[j,] = (tmp2$p1_succ_cue + tmp2$p2_succ_cue + tmp2$p3_succ_cue)/3
all_p_fail_cue_matrix[j,] = (tmp2$p1_fail_cue + tmp2$p2_fail_cue + tmp2$p3_fail_cue)/3
all_p_succ_result_matrix[j,] = (tmp2$p1_succ_result + tmp2$p2_succ_result + tmp2$p3_succ_result)/3
all_p_fail_result_matrix[j,] = (tmp2$p1_fail_result + tmp2$p2_fail_result + tmp2$p3_fail_result)/3
no_r_succ_cue_matrix[j,] <- tmp2$r0_succ_cue
no_r_fail_cue_matrix[j,] <- tmp2$r0_fail_cue
no_r_succ_result_matrix[j,] <- tmp2$r0_succ_result
no_r_fail_result_matrix[j,] <- tmp2$r0_fail_result
no_p_succ_cue_matrix[j,] <- tmp2$p0_succ_cue
no_p_fail_cue_matrix[j,] <- tmp2$p0_fail_cue
no_p_succ_result_matrix[j,] <- tmp2$p0_succ_result
no_p_fail_result_matrix[j,] <- tmp2$p0_fail_result
#plotting individual unit
df_cue <- data.frame(time,p3_fail=tmp2$p3_fail_cue,p2_fail=tmp2$p2_fail_cue,p1_fail=tmp2$p1_fail_cue,p0_fail=tmp2$p0_fail_cue,r0_succ=tmp2$r0_succ_cue,r1_succ=tmp2$r1_succ_cue,r2_succ=tmp2$r2_succ_cue,r3_succ=tmp2$r3_succ_cue)
#TODO melt not renaming variable name
df_cue <- melt(df_cue, id.vars="time", variable_name="level")
plt_cue <- ggplot(df_cue, aes(time,value)) + geom_line(aes(colour=variable))
plt_cue <- plt_cue + scale_color_brewer(palette="RdYlGn") +labs(title=paste(region_list[region_index],"unit",j,"cue"),y="normalized firing rate", x="time(s)") + geom_vline(xintercept=0)
df_result <- data.frame(time,p3_fail=tmp2$p3_fail_result,p2_fail=tmp2$p2_fail_result,p1_fail=tmp2$p1_fail_result,p0_fail=tmp2$p0_fail_result,r0_succ=tmp2$r0_succ_result,r1_succ=tmp2$r1_succ_result,r2_succ=tmp2$r2_succ_result,r3_succ=tmp2$r3_succ_result)
df_result <- melt(df_result, id.vars="time", variable_name="level")
plt_result <- ggplot(df_result, aes(time,value)) + geom_line(aes(colour=variable))
plt_result <- plt_result + scale_color_brewer(palette="RdYlGn") +labs(title='result',y="normalized firing rate", x="time(s)") + geom_vline(xintercept=0)
multiplot(plt_cue,plt_result,cols=1)
dev.off()
rm(tmp)
}
assign(r0_succ_cue_name,r0_succ_cue_matrix)
assign(r1_succ_cue_name,r1_succ_cue_matrix)
assign(r2_succ_cue_name,r2_succ_cue_matrix)
assign(r3_succ_cue_name,r3_succ_cue_matrix)
assign(r0_succ_result_name,r0_succ_result_matrix)
assign(r1_succ_result_name,r1_succ_result_matrix)
assign(r2_succ_result_name,r2_succ_result_matrix)
assign(r3_succ_result_name,r3_succ_result_matrix)
assign(r0_fail_cue_name,r0_fail_cue_matrix)
assign(r1_fail_cue_name,r1_fail_cue_matrix)
assign(r2_fail_cue_name,r2_fail_cue_matrix)
assign(r3_fail_cue_name,r3_fail_cue_matrix)
assign(r0_fail_result_name,r0_fail_result_matrix)
assign(r1_fail_result_name,r1_fail_result_matrix)
assign(r2_fail_result_name,r2_fail_result_matrix)
assign(r3_fail_result_name,r3_fail_result_matrix)
assign(p0_succ_cue_name,p0_succ_cue_matrix)
assign(p1_succ_cue_name,p1_succ_cue_matrix)
assign(p2_succ_cue_name,p2_succ_cue_matrix)
assign(p3_succ_cue_name,p3_succ_cue_matrix)
assign(p0_succ_result_name,p0_succ_result_matrix)
assign(p1_succ_result_name,p1_succ_result_matrix)
assign(p2_succ_result_name,p2_succ_result_matrix)
assign(p3_succ_result_name,p3_succ_result_matrix)
assign(p0_fail_cue_name,p0_fail_cue_matrix)
assign(p1_fail_cue_name,p1_fail_cue_matrix)
assign(p2_fail_cue_name,p2_fail_cue_matrix)
assign(p3_fail_cue_name,p3_fail_cue_matrix)
assign(p0_fail_result_name,p0_fail_result_matrix)
assign(p1_fail_result_name,p1_fail_result_matrix)
assign(p2_fail_result_name,p2_fail_result_matrix)
assign(p3_fail_result_name,p3_fail_result_matrix)
assign(all_r_succ_cue_name,all_r_succ_cue_matrix)
assign(all_r_fail_cue_name,all_r_fail_cue_matrix)
assign(all_r_succ_result_name,all_r_succ_result_matrix)
assign(all_r_fail_result_name,all_r_fail_result_matrix)
assign(all_p_succ_cue_name,all_p_succ_cue_matrix)
assign(all_p_fail_cue_name,all_p_fail_cue_matrix)
assign(all_p_succ_result_name,all_p_succ_result_matrix)
assign(all_p_fail_result_name,all_p_fail_result_matrix)
assign(no_r_succ_cue_name,no_r_succ_cue_matrix)
assign(no_r_fail_cue_name,no_r_fail_cue_matrix)
assign(no_r_succ_result_name,no_r_succ_result_matrix)
assign(no_r_fail_result_name,no_r_fail_result_matrix)
assign(no_p_succ_cue_name,no_p_succ_cue_matrix)
assign(no_p_fail_cue_name,no_p_fail_cue_matrix)
assign(no_p_succ_result_name,no_p_succ_result_matrix)
assign(no_p_fail_result_name,no_p_fail_result_matrix)
assign(total_array_name,total_array)
}
for(region_index in 1:length(catch_file_list)){
cat("\nplotting region (catch):",region_list[region_index])
filename = catch_file_list[region_index]
wb <- loadWorkbook(filename)
num_sheets <- length(sheets(wb))
total_array_name <- paste(region_list[region_index],"_catch_unit_info",sep="")
total_array <- array(NA,dim=c(28,20,num_sheets))
r_all_catch_cue_name <- paste(region_list[region_index],"_r_all_catch_cue",sep="")
r_all_catch_result_name <- paste(region_list[region_index],"_r_all_catch_result",sep="")
p_all_catch_cue_name <- paste(region_list[region_index],"_p_all_catch_cue",sep="")
p_all_catch_result_name <- paste(region_list[region_index],"_p_all_catch_result",sep="")
r_all_catch_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
r_all_catch_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p_all_catch_cue_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
p_all_catch_result_matrix <- matrix(NA,nrow=num_sheets,ncol=28,dimnames=list(1:num_sheets,time))
for (j in 1:num_sheets){
#cat('plotting unit', j,"catch trials\n")
tmp <- read.xlsx(filename,sheet = j, colNames=T)
tmp2 <- tmp[1:28,1:20]
tmp2$p3_catch_cue <- rollmean(tmp$p3_catch_cue,3)
tmp2$p2_catch_cue <- rollmean(tmp$p2_catch_cue,3)
tmp2$p1_catch_cue <- rollmean(tmp$p1_catch_cue,3)
tmp2$p0_catch_cue <- rollmean(tmp$p0_catch_cue,3)
tmp2$r0_catch_cue <- rollmean(tmp$r0_catch_cue,3)
tmp2$r1_catch_cue <- rollmean(tmp$r1_catch_cue,3)
tmp2$r2_catch_cue <- rollmean(tmp$r2_catch_cue,3)
tmp2$r3_catch_cue <- rollmean(tmp$r3_catch_cue,3)
tmp2$r_all_catch_cue <- rollmean(tmp$r_all_catch_cue,3)
tmp2$p_all_catch_cue <- rollmean(tmp$p_all_catch_cue,3)
tmp2$p3_catch_result <- rollmean(tmp$p3_catch_result,3)
tmp2$p2_catch_result <- rollmean(tmp$p2_catch_result,3)
tmp2$p1_catch_result <- rollmean(tmp$p1_catch_result,3)
tmp2$p0_catch_result <- rollmean(tmp$p0_catch_result,3)
tmp2$r0_catch_result <- rollmean(tmp$r0_catch_result,3)
tmp2$r1_catch_result <- rollmean(tmp$r1_catch_result,3)
tmp2$r2_catch_result <- rollmean(tmp$r2_catch_result,3)
tmp2$r3_catch_result <- rollmean(tmp$r3_catch_result,3)
tmp2$r_all_catch_result <- rollmean(tmp$r_all_catch_result,3)
tmp2$p_all_catch_result <- rollmean(tmp$p_all_catch_result,3)
total_array[1:28,1:20,j] <- data.matrix(tmp2)
r_all_catch_cue_matrix[j,] <- tmp2$r_all_catch_cue
r_all_catch_result_matrix[j,] <- tmp2$r_all_catch_result
p_all_catch_cue_matrix[j,] <- tmp2$p_all_catch_cue
p_all_catch_result_matrix[j,] <- tmp2$p_all_catch_result
df_cue <- data.frame(time,p3_catch=tmp2$p3_catch_cue,p2_catch=tmp2$p2_catch_cue,p1_catch=tmp2$p1_catch_cue,p0_catch=tmp2$p0_catch_cue,r0_catch=tmp2$r0_catch_cue,r1_catch=tmp2$r1_catch_cue,r2_catch=tmp2$r2_catch_cue,r3_catch=tmp2$r3_catch_cue)
#TODO melt not renaming variable name
df_cue <- melt(df_cue, id.vars="time", variable_name="level")
plt_cue <- ggplot(df_cue, aes(time,value)) + geom_line(aes(colour=variable))
plt_cue <- plt_cue + scale_color_brewer(palette="RdYlGn") +labs(title=paste(region_list[region_index],"unit",j,"cue"),y="normalized firing rate", x="time(s)") + geom_vline(xintercept=0)
df_result <- data.frame(time,p3_catch=tmp2$p3_catch_result,p2_catch=tmp2$p2_catch_result,p1_catch=tmp2$p1_catch_result,p0_catch=tmp2$p0_catch_result,r0_catch=tmp2$r0_catch_result,r1_catch=tmp2$r1_catch_result,r2_catch=tmp2$r2_catch_result,r3_catch=tmp2$r3_catch_result)
df_result <- melt(df_result, id.vars="time", variable_name="level")
plt_result <- ggplot(df_result, aes(time,value)) + geom_line(aes(colour=variable))
plt_result <- plt_result + scale_color_brewer(palette="RdYlGn") +labs(title='result',y="normalized firing rate", x="time(s)") + geom_vline(xintercept=0)
df_catch_all_cue <- data.frame(time,r_all_catch=tmp2$r_all_catch_cue,p_all_catch=tmp2$p_all_catch_cue)
df_catch_all_cue <- melt(df_catch_all_cue,id.vars="time",variable_name="level")
plt_catch_cue <- ggplot(df_catch_all_cue,aes(time,value)) + geom_line(aes(colour=variable))
plt_catch_cue <- plt_catch_cue + labs(title=paste(region_list[region_index],'unit',j,'cue catch all'),y='normalized firing rate',x="time(s)") + geom_vline(xintercept=0)
df_catch_all_result <- data.frame(time,r_all_catch=tmp2$r_all_catch_result,p_all_catch=tmp2$p_all_catch_result)
df_catch_all_result <- melt(df_catch_all_result,id.vars="time",variable_name="level")
plt_catch_result <- ggplot(df_catch_all_result,aes(time,value)) + geom_line(aes(colour=variable))
plt_catch_result <- plt_catch_result + labs(title='result catch all',y='normalized firing rate',x="time(s)") + geom_vline(xintercept=0)
png(paste(region_list[region_index],"_catch_multilevels_unit_",j,".png",sep=""),width=8,height=6,units="in",res=500)
multiplot(plt_cue,plt_result,cols=1)
dev.off()
png(paste(region_list[region_index],"_catch_all_unit_",j,".png",sep=""),width=8,height=6,units="in",res=500)
multiplot(plt_catch_cue,plt_catch_result,cols=1)
dev.off()
rm(tmp)
}
assign(r_all_catch_cue_name,r_all_catch_cue_matrix)
assign(r_all_catch_result_name,r_all_catch_result_matrix)
assign(p_all_catch_cue_name,p_all_catch_cue_matrix)
assign(p_all_catch_result_name,p_all_catch_result_matrix)
assign(total_array_name,total_array)
}
M1_matrices <- abind(M1_r3_succ_cue_all,M1_r3_succ_result_all,M1_p3_fail_cue_all,M1_p3_fail_result_all,M1_all_r_succ_cue,M1_all_r_fail_cue,M1_all_r_succ_result,M1_all_r_fail_result,M1_all_p_succ_cue,M1_all_p_fail_cue,M1_all_p_succ_result,M1_all_p_fail_result,M1_no_r_succ_cue,M1_no_r_fail_cue,M1_no_r_succ_result,M1_no_r_fail_result,M1_no_p_succ_cue,M1_no_p_fail_cue,M1_no_p_succ_result,M1_no_p_fail_result,M1_r_all_catch_cue,M1_r_all_catch_result,M1_p_all_catch_cue,M1_p_all_catch_result,along=3)
M1_matrix_keys <- c('M1_r3_succ_cue','M1_r3_succ_result','M1_p3_fail_cue','M1_p3_fail_result','M1_all_r_succ_cue','M1_all_r_fail_cue','M1_all_r_succ_result','M1_all_r_fail_result','M1_all_p_succ_cue','M1_all_p_fail_cue','M1_all_p_succ_result','M1_all_p_fail_result','M1_no_r_succ_cue','M1_no_r_fail_cue','M1_no_r_succ_result','M1_no_r_fail_result','M1_no_p_succ_cue','M1_no_p_fail_cue','M1_no_p_succ_result','M1_no_p_fail_result','M1_r_all_catch_cue','M1_r_all_catch_result','M1_p_all_catch_cue','M1_p_all_catch_result')
S1_matrices <- abind(S1_r3_succ_cue_all,S1_r3_succ_result_all,S1_p3_fail_cue_all,S1_p3_fail_result_all,S1_all_r_succ_cue,S1_all_r_fail_cue,S1_all_r_succ_result,S1_all_r_fail_result,S1_all_p_succ_cue,S1_all_p_fail_cue,S1_all_p_succ_result,S1_all_p_fail_result,S1_no_r_succ_cue,S1_no_r_fail_cue,S1_no_r_succ_result,S1_no_r_fail_result,S1_no_p_succ_cue,S1_no_p_fail_cue,S1_no_p_succ_result,S1_no_p_fail_result,S1_r_all_catch_cue,S1_r_all_catch_result,S1_p_all_catch_cue,S1_p_all_catch_result,along=3)
S1_matrix_keys <- c('S1_r3_succ_cue','S1_r3_succ_result','S1_p3_fail_cue','S1_p3_fail_result','S1_all_r_succ_cue','S1_all_r_fail_cue','S1_all_r_succ_result','S1_all_r_fail_result','S1_all_p_succ_cue','S1_all_p_fail_cue','S1_all_p_succ_result','S1_all_p_fail_result','S1_no_r_succ_cue','S1_no_r_fail_cue','S1_no_r_succ_result','S1_no_r_fail_result','S1_no_p_succ_cue','S1_no_p_fail_cue','S1_no_p_succ_result','S1_no_p_fail_result','S1_r_all_catch_cue','S1_r_all_catch_result','S1_p_all_catch_cue','S1_p_all_catch_result')
PmD_matrices <- abind(PmD_r3_succ_cue_all,PmD_r3_succ_result_all,PmD_p3_fail_cue_all,PmD_p3_fail_result_all,PmD_all_r_succ_cue,PmD_all_r_fail_cue,PmD_all_r_succ_result,PmD_all_r_fail_result,PmD_all_p_succ_cue,PmD_all_p_fail_cue,PmD_all_p_succ_result,PmD_all_p_fail_result,PmD_no_r_succ_cue,PmD_no_r_fail_cue,PmD_no_r_succ_result,PmD_no_r_fail_result,PmD_no_p_succ_cue,PmD_no_p_fail_cue,PmD_no_p_succ_result,PmD_no_p_fail_result,PmD_r_all_catch_cue,PmD_r_all_catch_result,PmD_p_all_catch_cue,PmD_p_all_catch_result,along=3)
PmD_matrix_keys <- c('PmD_r3_succ_cue','PmD_r3_succ_result','PmD_p3_fail_cue','PmD_p3_fail_result','PmD_all_r_succ_cue','PmD_all_r_fail_cue','PmD_all_r_succ_result','PmD_all_r_fail_result','PmD_all_p_succ_cue','PmD_all_p_fail_cue','PmD_all_p_succ_result','PmD_all_p_fail_result','PmD_no_r_succ_cue','PmD_no_r_fail_cue','PmD_no_r_succ_result','PmD_no_r_fail_result','PmD_no_p_succ_cue','PmD_no_p_fail_cue','PmD_no_p_succ_result','PmD_no_p_fail_result','PmD_r_all_catch_cue','PmD_r_all_catch_result','PmD_p_all_catch_cue','PmD_p_all_catch_result')
cat("\nM1 heatmaps")
for (i in 1:length(M1_matrix_keys)){
if (any(is.na(M1_matrices[,,i]))){
M1_matrices[,,i][is.na(M1_matrices[,,i])] = 0
cat("\nna in",M1_matrix_keys[i])
}
png(paste(M1_matrix_keys[i],".png",sep=""),width=8,height=6,units="in",res=500)
heatmap.2(M1_matrices[,,i],Colv=F,dendrogram="row",scale="row",col=rev(brewer.pal(11,"RdBu")),main=M1_matrix_keys[i],trace="none",cexRow=0.5,ylab="unit",xlab="time (s)",colsep=9)
dev.off()
}
cat("\nS1 heatmaps")
for (i in 1:length(S1_matrix_keys)){
if (any(is.na(S1_matrices[,,i]))){
S1_matrices[,,i][is.na(S1_matrices[,,i])] = 0
cat("\nna in",S1_matrix_keys[i])
}
png(paste(S1_matrix_keys[i],".png",sep=""),width=8,height=6,units="in",res=500)
heatmap.2(S1_matrices[,,i],Colv=F,dendrogram="row",scale="row",col=rev(brewer.pal(11,"RdBu")),main=S1_matrix_keys[i],trace="none",cexRow=0.5,ylab="unit",xlab="time (s)",colsep=9)
dev.off()
}
cat("\nPmD heatmaps")
for (i in 1:length(PmD_matrix_keys)){
if (any(is.na(PmD_matrices[,,i]))){
PmD_matrices[,,i][is.na(PmD_matrices[,,i])] = 0
cat("\nna in",PmD_matrix_keys[i])
}
png(paste(PmD_matrix_keys[i],".png",sep=""),width=8,height=6,units="in",res=500)
heatmap.2(PmD_matrices[,,i],Colv=F,dendrogram="row",scale="row",col=rev(brewer.pal(11,"RdBu")),main=PmD_matrix_keys[i],trace="none",cexRow=0.5,ylab="unit",xlab="time (s)",colsep=9)
dev.off()
}
cat("\ngripforce plots")
gf_matrix_keys <- c('r3_succ_cue','r3_succ_result','p3_fail_cue','p3_fail_result','ra_succ_cue','ra_fail_cue','ra_succ_result','ra_fail_result','pa_succ_cue','pa_fail_cue','pa_succ_result','pa_fail_result','r0_succ_cue','r0_fail_cue','r0_succ_result','r0_fail_result','p0_succ_cue','p0_fail_cue','p0_succ_result','p0_fail_result','r_all_catch_cue','r_all_catch_result','p_all_catch_cue','p_all_catch_result')
for (i in 1:length(gf_matrix_keys)){
avg_key <- paste(gf_matrix_keys[i],'_avg',sep="")
std_key <- paste(gf_matrix_keys[i],'_std',sep="")
#cat("\n",avg_key)
#cat("\n",std_key)
gf <- gf_total[[avg_key]]
std <- gf_total[[std_key]]
plot_gf(gf,std,gf_matrix_keys[i])
}
cat("\nsaving")
# save.image(file="rearranged_data.RData")
rm(list=ls())
|
af1be1e7a89cdc544ab33e52dd3059c3b7ca95a0 | 7218dd41cbe126a617485cd463f9d6a93dfc1eb9 | /man/sgp_small_multiples.Rd | a074743be33f8667a3042ade1a197be54af57262 | [] | no_license | almartin82/MAP-visuals | 0c171b37978cefed94d46336457e0ef8a672c669 | 28102126dc4b40c6566a6f20248d4f871613253a | refs/heads/master | 2021-01-10T19:22:04.324129 | 2015-06-11T18:38:42 | 2015-06-11T18:38:42 | 10,979,358 | 1 | 1 | null | 2015-02-05T19:41:56 | 2013-06-26T21:16:19 | R | UTF-8 | R | false | false | 514 | rd | sgp_small_multiples.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/sgp_small_multiples.R
\name{sgp_small_multiples}
\alias{sgp_small_multiples}
\title{SGP small multiples}
\usage{
sgp_small_multiples(df, stu_per_row = 12)
}
\arguments{
\item{df}{long data frame in TEAM canonical style}
\item{stu_per_row}{how many kids wide?}
}
\value{
returns a ggplot object
}
\description{
\code{sgp_small_multiples} returns a ggplot facet_grid with a box for every kid, showing
their SGP and rit change
}
|
15a521bf5de7ec92a793bba50d34e4fd662ff45e | 82b4ac6a93625c4b73a3792fd338c1f6744213d6 | /figures/figures.r | fadaeba4be14d58eeca2f9dccbe6554ebbe305cc | [] | no_license | kroppheather/larix_density_ecohydro | 0f2c55dc6390947e1d143b7397b3931a0af4b9c7 | 58c2b1312e43daffe69631ad9bce588608fbf01b | refs/heads/master | 2020-04-10T20:44:51.926019 | 2019-02-01T15:06:14 | 2019-02-01T15:06:14 | 124,288,800 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,669 | r | figures.r | ###########################################################################
###########################################################################
############## Created by Heather Kropp in October 2017 ##############
############## This script creates figures for all time ##############
############## series data. ##############
###########################################################################
###########################################################################
############## Input files: ##############
############## from sapflux calc: ##############
############## Transpiration: El.L,El.L17,El.H,El.H17 ##############
############## stomatal conductance:gc.L, gc.L17, gc.H, gc.H17#############
############## tree info: datTreeL, datTreeL17, datTreeH, #############
############## datTreeH17 #############
############## from thaw depth: TDall #############
###########################################################################
#################################################################
####read in sapflow data #######
#################################################################
source("c:\\Users\\hkropp\\Documents\\GitHub\\larix_density_ecohydro\\sapflux_process.r")
#libraries loaded from source
#plyr, lubridate,caTools
#set the plotting directory
plotDI <- "c:\\Users\\hkropp\\Google Drive\\Viper_Ecohydro\\time_plot"
#################################################################
####read in thaw depth data #######
#################################################################
source("c:\\Users\\hkropp\\Documents\\GitHub\\larix_density_ecohydro\\thaw_depth_process.r")
#################################################################
####read in datafiles #######
#################################################################
#read in precip data
datAirP <- read.csv("c:\\Users\\hkropp\\Google Drive\\viperSensor\\airport\\airport.csv")
#read in continuous soil data
datSW <- read.csv("c:\\Users\\hkropp\\Google Drive\\viperSensor\\soil\\vwc.GS3.csv")
#canopy rh and temperature
datRH <- read.csv("c:\\Users\\hkropp\\Google Drive\\viperSensor\\met\\RH.VP4.csv")
datTC <- read.csv("c:\\Users\\hkropp\\Google Drive\\viperSensor\\met\\TempC.VP4.csv")
#PAR
datPAR <- read.csv("c:\\Users\\hkropp\\Google Drive\\viperSensor\\met\\PAR.QSOS PAR.csv")
#read in leaf and sapwood area
datLSA <- read.csv("c:\\Users\\hkropp\\Google Drive\\Viper_Ecohydro\\sapflux_diag\\tables\\treeSummary.csv")
#################################################################
####calculate daily transpiration #######
#################################################################
#El is in g m-2 s-1
#convert to g m-2 half hour-1
#and reorganize
E.temp <- list(El.L,El.L17,El.H,El.H17)
E.dim <- numeric(0)
E.temp2 <- list()
E.temp3 <- list()
E.tempwork <- list()
for(i in 1:4){
E.dim[i] <- dim(E.temp[[i]])[2]
E.temp2[[i]] <- data.frame(E.temp[[i]][,1:3], E.temp[[i]][,4:E.dim[i]]*60*30)
E.temp3[[i]] <- data.frame(doy=rep(E.temp2[[i]]$doy,times=E.dim[i]-3),
year=rep(E.temp2[[i]]$year,times=E.dim[i]-3),
hour=rep(E.temp2[[i]]$hour,times=E.dim[i]-3),
E.hh = as.vector(data.matrix(E.temp2[[i]][,4:E.dim[i]])),
tree = rep(seq(1,E.dim[i]-3), each=dim(E.temp2[[i]])[1]))
E.temp3[[i]] <- na.omit(E.temp3[[i]])
E.tempwork[[i]] <- E.temp3[[i]]
E.tempwork[[i]]$E.ss <- E.temp3[[i]]$E.hh/(30*60)
E.tempwork[[i]]$dataset <- rep(i,dim(E.tempwork[[i]])[1])
}
Esshh <- ldply(E.tempwork,data.frame)
#convert to mols
Esshh$E.mmols <- (Esshh$E.ss/18)*1000
#now aggregate to see how many observations in a day
#and pull out data on days that have at least 3 trees
#and those trees have all 48 measurements in a day
ELength <- list()
EdayL <- list()
E.temp4 <- list()
for(i in 1:4){
ELength[[i]] <- aggregate(E.temp3[[i]]$E.hh, by=list(E.temp3[[i]]$doy,E.temp3[[i]]$year,E.temp3[[i]]$tree),
FUN="length")
ELength[[i]] <- ELength[[i]][ELength[[i]]$x==48,]
colnames(ELength[[i]]) <- c("doy","year","tree","count")
#find out how many tree observations on each day
EdayL[[i]] <- aggregate(ELength[[i]]$tree, by=list(ELength[[i]]$doy,ELength[[i]]$year), FUN="length")
colnames(EdayL[[i]])<- c("doy","year", "ntree")
#subset to only use days with at least 3 trees
EdayL[[i]] <- EdayL[[i]][EdayL[[i]]$ntree>=3,]
#join to only include days with enough sensors
ELength[[i]] <- join(ELength[[i]],EdayL[[i]], by=c("doy","year"), type="inner")
#create a tree, day id
ELength[[i]]$treeDay <- seq(1, dim(ELength[[i]])[1])
ELength[[i]]$dataset <- rep(i, dim(ELength[[i]])[1])
#ELength now has the list of each sensor and day that should be included
#subset the data to only do the calculations on the trees that meet the minimum criteria
E.temp4[[i]] <- join(E.temp3[[i]],ELength[[i]], by=c("doy", "year", "tree"), type="inner")
}
#turn back into a dataframe
EtempALL <- ldply(E.temp4,data.frame)
EInfo <- ldply(ELength,data.frame)
#get the daily integration of the transpiration
EdayT <- numeric(0)
EdayTemp <- list()
for(i in 1:dim(EInfo)[1]){
EdayTemp[[i]] <- data.frame(x=EtempALL$hour[EtempALL$treeDay==EInfo$treeDay[i]&EtempALL$dataset==EInfo$dataset[i]],
y=EtempALL$E.hh[EtempALL$treeDay==EInfo$treeDay[i]&EtempALL$dataset==EInfo$dataset[i]])
EdayT[i] <- trapz(EdayTemp[[i]]$x,EdayTemp[[i]]$y)
}
#add daily value into Einfo
EInfo$T.day <- EdayT
#in g per day now
EInfo$T.Lday <- EdayT/1000
#add stand labels to the datasets
EInfo$stand <- ifelse(EInfo$dataset==1|EInfo$dataset==2,"ld","hd")
#get the stand averages of daily transpiration across day
EdayLm <- aggregate(EInfo$T.Lday, by=list(EInfo$doy,EInfo$year,EInfo$stand), FUN="mean")
EdayLsd <- aggregate(EInfo$T.Lday, by=list(EInfo$doy,EInfo$year,EInfo$stand), FUN="sd")
EdayLl <- aggregate(EInfo$T.Lday, by=list(EInfo$doy,EInfo$year,EInfo$stand), FUN="length")
Eday <- EdayLm
colnames(Eday) <- c("doy","year","site","T.L.day")
Eday$T.sd <- EdayLsd$x
Eday$T.n <- EdayLl$x
Eday$T.se <- Eday$T.sd/sqrt(Eday$T.n)
#aggrate to half hourly
Esshh$site <-ifelse(Esshh$dataset==1|Esshh$dataset==2, "ld", "hd")
#filter unrealistic values
Esshh <- Esshh[Esshh$E.mmols<quantile(Esshh$E.mmols,.975),]
EHHave <- aggregate(Esshh$E.mmols, by=list(Esshh$hour,Esshh$doy,Esshh$year,Esshh$site), FUN="mean" )
colnames(EHHave) <- c("hour","doy", "year","site", "E.hh")
EHHsd <- aggregate(Esshh$E.mmols, by=list(Esshh$hour,Esshh$doy,Esshh$year,Esshh$site), FUN="sd" )
EHHn <- aggregate(Esshh$E.mmols, by=list(Esshh$hour,Esshh$doy,Esshh$year,Esshh$site), FUN="length" )
EHHave$E.sd <- EHHsd$x
EHHave$E.n <- EHHn$x
EHHave$E.se <- EHHave$E.sd /sqrt(EHHave$E.n)
#################################################################
####calculate gc daily values across all tree #######
#################################################################
#reoranize into data frames
gctemp <- list(gc.L, gc.L17, gc.H, gc.H17)
gcdim <- numeric(0)
gctemp2 <- list()
for(i in 1:4){
gcdim[i] <- dim(gctemp[[i]])[2]
gctemp2[[i]] <- data.frame(doy=rep(gctemp[[i]]$doy,times=gcdim[i]-3),
year=rep(gctemp[[i]]$year,times=gcdim[i]-3),
hour=rep(gctemp[[i]]$hour,times=gcdim[i]-3),
gc.h = as.vector(data.matrix(gctemp[[i]][,4:gcdim[i]])),
tree = rep(seq(1,gcdim[i]-3), each=dim(gctemp[[i]])[1]),
datset=rep(i, each=dim(gctemp[[i]])[1]) )
gctemp2[[i]] <- na.omit(gctemp2[[i]])
}
gctemp3 <- ldply(gctemp2, data.frame)
#check that there aren't days with too few observations
gclength <- list()
gcLday1 <- list()
gcLday2 <- list()
for(i in 1:4){
#how many observations in days and trees
gclength[[i]] <- aggregate(gctemp2[[i]]$gc.h, by=list(gctemp2[[i]]$doy,gctemp2[[i]]$year,gctemp2[[i]]$tree), FUN="length")
#subset to exclude trees that only have one obs in a day
gclength[[i]] <- gclength[[i]][gclength[[i]]$x>3,]
#how many trees in days
gcLday1[[i]] <- aggregate(gclength[[i]]$Group.3, by=list(gclength[[i]]$Group.1,gclength[[i]]$Group.2),FUN="length")
}
# alot of observations so no need to subset more
# half hourly average across all trees
gcHHave <-aggregate(gctemp3$gc.h, by=list(gctemp3$hour,gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="mean")
colnames(gcHHave) <- c("hour","doy", "year", "dataset","gc.mmol.s")
gcHHsd <-aggregate(gctemp3$gc.h, by=list(gctemp3$hour,gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="sd")
gcHHn <-aggregate(gctemp3$gc.h, by=list(gctemp3$hour,gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="length")
gcHHave$gc.sd <- gsHHsd$x
gcHHave$gc.n <- gsHHn$x
gcHHave$gc.se <- gsHHave$gc.sd/sqrt(gsHHave$gc.n)
gcHHave$site <- ifelse(gcHHave$dataset==1|gcHHave$dataset==2,"ld","hd")
#get the average daily gc across all trees
gsDave <- aggregate(gctemp3$gc.h, by=list(gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="mean")
gsDsd <- aggregate(gctemp3$gc.h, by=list(gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="sd")
gsDn <- aggregate(gctemp3$gc.h, by=list(gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="length")
colnames(gsDave) <- c("doy", "year", "dataset","gc.mmol.s")
gsDave$gc.sd <- gsDsd$x
gsDave$gc.n <- gsDn$x
gsDave$gc.se <- gsDave$gc.sd/sqrt(gsDave$gc.n)
gsHHave <- aggregate(gctemp3$gc.h, by=list(gctemp3$hour,gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="mean")
gsHHsd <- aggregate(gctemp3$gc.h, by=list(gctemp3$hour,gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="sd")
gsHHn <- aggregate(gctemp3$gc.h, by=list(gctemp3$hour,gctemp3$doy,gctemp3$year,gctemp3$datset), FUN="length")
colnames(gsHHave) <- c("hour","doy", "year", "dataset","gc.mmol.s")
gsHHave$gc.sd <- gsHHsd$x
gsHHave$gc.n <- gsHHn$x
gsHHave$gc.se <- gsHHave$gc.sd/sqrt(gsHHave$gc.n)
#label the site
gsDave$site <- ifelse(gsDave$dataset==1|gsDave$dataset==2,"ld","hd")
gsHHave$site <- ifelse(gsHHave$dataset==1|gsHHave$dataset==2,"ld","hd")
#################################################################
####aggregate and organize met #######
#################################################################
#subset and match
datLRHmet <- data.frame(datRH[datRH$site=="ld",1:3], RH=datRH$RH.VP4[datRH$site=="ld"])
datLTCmet <- data.frame(datTC[datTC$site=="ld",1:3], Temp=datTC$TempC.VP4[datTC$site=="ld"])
datHRHmet <- data.frame(datRH[datRH$site=="hd",1:3], RH=datRH$RH.VP4[datRH$site=="hd"])
datHTCmet <- data.frame(datTC[datTC$site=="hd",1:3], Temp=datTC$TempC.VP4[datTC$site=="hd"])
#join temp and RH
datLmet <- join(datLRHmet, datLTCmet, by=c("doy","year","hour"),type="inner")
datHmet <- join(datHRHmet, datHTCmet, by=c("doy","year","hour"),type="inner")
#calculate VPD
datLe.sat<-0.611*exp((17.502*datLmet$Temp)/(datLmet$Temp+240.97))
datHe.sat<-0.611*exp((17.502*datHmet$Temp)/(datHmet$Temp+240.97))
datLRHfix<-ifelse(datLmet$RH>=1,.999,datLmet$RH)
datHRHfix<-ifelse(datHmet$RH>=1,.999,datHmet$RH)
datLmet$D<-(datLe.sat-(datLRHfix*datLe.sat))
datHmet$D<-(datHe.sat-(datHRHfix*datHe.sat))
#join PAR to the dataframes
datPARL <- data.frame(doy=datPAR$doy[datPAR$site=="ld"], year=datPAR$year[datPAR$site=="ld"],
hour=datPAR$hour[datPAR$site=="ld"], PAR=datPAR$PAR.QSOS.Par[datPAR$site=="ld"])
datPARH <- data.frame(doy=datPAR$doy[datPAR$site=="hd"], year=datPAR$year[datPAR$site=="hd"],
hour=datPAR$hour[datPAR$site=="hd"], PAR=datPAR$PAR.QSOS.Par[datPAR$site=="hd"])
#join into met
datLmet <- join(datLmet, datPARL, by=c("doy","year","hour"), type="left")
datHmet <- join(datHmet, datPARH, by=c("doy","year","hour"), type="left")
#pull out daily means
dayLD <- aggregate(datLmet$D, by=list(datLmet$doy,datLmet$year), FUN="mean")
colnames(dayLD) <- c("doy","year","D")
dayLT <- aggregate(datLmet$Temp, by=list(datLmet$doy,datLmet$year), FUN="mean")
colnames(dayLT) <- c("doy","year","T")
dayL <- join(dayLT,dayLD, by=c("doy","year"),type="full")
dayHD <- aggregate(datHmet$D, by=list(datHmet$doy,datHmet$year), FUN="mean")
colnames(dayHD) <- c("doy","year","D")
dayHT <- aggregate(datHmet$Temp, by=list(datHmet$doy,datHmet$year), FUN="mean")
colnames(dayHT) <- c("doy","year","T")
dayH <- join(dayHT,dayHD, by=c("doy","year"),type="full")
dayLP <-aggregate(datLmet$PAR, by=list(datLmet$doy,datLmet$year), FUN="max")
dayHP <-aggregate(datHmet$PAR, by=list(datHmet$doy,datHmet$year), FUN="max")
colnames(dayLP)<- c("doy","year","PARmax")
colnames(dayHP)<- c("doy","year","PARmax")
dayL <- join(dayL,dayLP, by=c("doy","year"),type="full")
dayH <- join(dayH,dayHP, by=c("doy","year"),type="full")
#################################################################
####filter gc to exclude measurements when D is too low #######
#################################################################
#filter gc when D is less than 6
#first combine met and include a site id
datLmet1 <- datLmet
datHmet1 <- datHmet
datLmet1$site <- rep("ld", dim(datLmet)[1])
datHmet1$site <- rep("hd", dim(datHmet)[1])
datAj <- rbind(datLmet1, datHmet1)
gcHHave <- join(gcHHave, datAj, by=c("doy","year","hour","site"), type="left")
gcHHave$gc.mmol.sf <- ifelse(gcHHave$D<.6|gcHHave$PAR<5,NA,gcHHave$gc.mmol.s)
#################################################################
####make a panel of daily met and T and gc calc #######
#################################################################
#filter out point that seems to have an erroneous meas
Eday <- Eday[Eday$T.L.day<.4,]
#day range for x axis
xl2016 <- 1
xh2016 <- 245
xl2017 <- 155
xh2017 <- 230
#subset precip
prec2016 <- datAirP[datAirP$doy<=xh2016&datAirP$doy>=xl2016&datAirP$year==2016,]
prec2017 <- datAirP[datAirP$doy<=xh2017&datAirP$doy>=xl2017&datAirP$year==2017,]
#set up plot widths
wd <- 35
hd <-17
colL <- "royalblue"
colH <- "tomato3"
colHt <- rgb(205/255,79/255,57/255, .5)
colLt <- rgb(65/255,105/255,225/255,.5)
ylT <- 0
yhT <- .3
ylG <- 0
yhG <- 300
ylA <- 0
yhA <- 25
ylD <- 0
yhD <- 1.6
axisC <- 5
TDmax <- 75
TDscale <- yhA/TDmax
Prmax <- 40
Prscale <- yhD/Prmax
jpeg(paste0(plotDI , "\\daily_summary.jpg"), width=2600, height=2200, units="px")
ab <- layout(matrix(seq(1,8), ncol=2, byrow=TRUE), width=rep(lcm(wd),8), height=rep(lcm(hd),8))
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2016,xh2016), ylim=c(ylT,yhT),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(Eday$doy[Eday$site=="ld"&Eday$year==2016],
Eday$T.L.day[Eday$site=="ld"&Eday$year==2016],pch=19,
col=colL,cex=5 )
points(Eday$doy[Eday$site=="hd"&Eday$year==2016],
Eday$T.L.day[Eday$site=="hd"&Eday$year==2016],pch=19,
col=colH,cex=5 )
arrows(Eday$doy[Eday$year==2016],
Eday$T.L.day[Eday$year==2016]-
Eday$T.se[Eday$year==2016],
Eday$doy[Eday$year==2016],
Eday$T.L.day[Eday$year==2016]+
Eday$T.se[Eday$year==2016],lwd=3, code=0)
axis(2, seq(ylT,yhT, by=.1 ), las=2, cex.axis=axisC, lwd.ticks=3)
legend(220,yhT,c("low density", "high density"), col=c(colL,colH), pch=19, cex=4, bty="n")
mtext("Daily transpiraiton", side=2, line=18, cex=4)
mtext(expression(paste("(L m"^"-2","day"^"-1",")")), side=2, line=10, cex=4)
mtext("2016", side=3, line=5, cex=4)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2017,xh2017), ylim=c(ylT,yhT),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(Eday$doy[Eday$site=="ld"&Eday$year==2017],
Eday$T.L.day[Eday$site=="ld"&Eday$year==2017],pch=19,
col=colL,cex=5 )
points(Eday$doy[Eday$site=="hd"&Eday$year==2017],
Eday$T.L.day[Eday$site=="hd"&Eday$year==2017],pch=19,
col=colH,cex=5 )
arrows(Eday$doy[Eday$year==2017],
Eday$T.L.day[Eday$year==2017]-
Eday$T.se[Eday$year==2017],
Eday$doy[Eday$year==2017],
Eday$T.L.day[Eday$year==2017]+
Eday$T.se[Eday$year==2017],lwd=3, code=0)
axis(4, seq(ylT,yhT, by=.1 ), las=2, cex.axis=axisC, lwd.ticks=3)
mtext("2017", side=3, line=5, cex=4)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2016,xh2016), ylim=c(ylG,yhG),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(gsDave$doy[gsDave$site=="ld"&gsDave$year==2016],
gsDave$gc.mmol.s[gsDave$site=="ld"&gsDave$year==2016],pch=19,
col=colL,cex=5, type="b", lwd=3 )
points(gsDave$doy[gsDave$site=="hd"&gsDave$year==2016],
gsDave$gc.mmol.s[gsDave$site=="hd"&gsDave$year==2016],pch=19,
col=colH,cex=5, type="b", lwd=3 )
arrows(gsDave$doy[gsDave$year==2016],
gsDave$gc.mmol.s[gsDave$year==2016]-
gsDave$gc.se[gsDave$year==2016],
gsDave$doy[gsDave$year==2016],
gsDave$gc.mmol.s[gsDave$year==2016]+
gsDave$gc.se[gsDave$year==2016],lwd=3, code=0)
mtext("Daily average gc", side=2, line=18, cex=4)
mtext(expression(paste("(mmol m"^"-2","s"^"-1",")")), side=2, line=10, cex=4)
axis(2, seq(0, yhG-50, by=50), las=2, cex.axis=axisC, lwd.ticks=3)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2017,xh2017), ylim=c(ylG,yhG),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(gsDave$doy[gsDave$site=="ld"&gsDave$year==2017],
gsDave$gc.mmol.s[gsDave$site=="ld"&gsDave$year==2017],pch=19,
col=colL,cex=5, type="b", lwd=3 )
points(gsDave$doy[gsDave$site=="hd"&gsDave$year==2017],
gsDave$gc.mmol.s[gsDave$site=="hd"&gsDave$year==2017],pch=19,
col=colH,cex=5, type="b", lwd=3 )
arrows(gsDave$doy[gsDave$year==2017],
gsDave$gc.mmol.s[gsDave$year==2017]-
gsDave$gc.se[gsDave$year==2017],
gsDave$doy[gsDave$year==2017],
gsDave$gc.mmol.s[gsDave$year==2017]+
gsDave$gc.se[gsDave$year==2017],lwd=3, code=0)
axis(4, seq(0, yhG-50, by=50), las=2, cex.axis=axisC, lwd.ticks=3)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2016,xh2016), ylim=c(ylA,yhA),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(dayH$doy[dayH$doy<=xh2016&dayH$doy>=xl2016&dayH$year==2016],
dayH$T[dayH$doy<=xh2016&dayH$doy>=xl2016&dayH$year==2016], type="l",
lwd=6, col=colH)
points(dayL$doy[dayL$doy<=xh2016&dayL$doy>=xl2016&dayL$year==2016],
dayL$T[dayL$doy<=xh2016&dayL$doy>=xl2016&dayL$year==2016], type="l",
lwd=6, col=colLt)
axis(2, seq(0,20, by=5), las=2, cex.axis=axisC, lwd.ticks=3)
points(TDall$doy[TDall$year==2016&TDall$site=="ld"],TDall$TDday[TDall$year==2016&TDall$site=="ld"]*TDscale,
type="l", col=colL, lty=4, lwd=6)
points(TDall$doy[TDall$year==2016&TDall$site=="hd"],TDall$TDday[TDall$year==2016&TDall$site=="hd"]*TDscale,
type="l", col=colHt, lty=4, lwd=6)
mtext("Daily average air temp", side=2, line=18, cex=4)
mtext("(C)", side=2, line=10, cex=4)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2017,xh2017), ylim=c(ylA,yhA),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(dayH$doy[dayH$doy<=xh2017&dayH$doy>=xl2017&dayH$year==2017],
dayH$T[dayH$doy<=xh2017&dayH$doy>=xl2017&dayH$year==2017], type="l",
lwd=6, col=colH)
points(dayL$doy[dayL$doy<=xh2017&dayL$doy>=xl2017&dayL$year==2017],
dayL$T[dayL$doy<=xh2017&dayL$doy>=xl2017&dayL$year==2017], type="l",
lwd=6, col=colLt)
points(TDall$doy[TDall$year==2017&TDall$site=="ld"],TDall$TDday[TDall$year==2017&TDall$site=="ld"]*TDscale,
type="l", col=colL, lty=4, lwd=6)
points(TDall$doy[TDall$year==2017&TDall$site=="hd"],TDall$TDday[TDall$year==2017&TDall$site=="hd"]*TDscale,
type="l", col=colHt, lty=4, lwd=6)
axis(4, seq(0,20,by=5),seq(0,20,by=5)*3, las=2, cex.axis=axisC, lwd.ticks=3)
legend(165,26, c("low density TD", "high density TD", "low density Ta", "high density Ta"),
col=c(colL,colHt,colLt,colH), lwd=6, lty=c(4,4,1,1), bty="n", cex=4)
mtext("Thaw depth", side=4, line=10, cex=4)
mtext("(cm)", side=4, line=18, cex=4)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2016,xh2016), ylim=c(ylD,yhD),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
for(i in 1:dim(prec2016)[1]){
polygon(c(prec2016$doy[i]-.5,prec2016$doy[i]-.5,prec2016$doy[i]+.5,prec2016$doy[i]+.5),
c(0,prec2016$Pr.mm[i]*Prscale,prec2016$Pr.mm[i]*Prscale,0), col="grey60", border=FALSE)
}
points(dayH$doy[dayH$doy<=xh2016&dayH$doy>=xl2016&dayH$year==2016],
dayH$D[dayH$doy<=xh2016&dayH$doy>=xl2016&dayH$year==2016], type="l",
lwd=6, col=colH)
points(dayL$doy[dayL$doy<=xh2016&dayL$doy>=xl2016&dayL$year==2016],
dayL$D[dayL$doy<=xh2016&dayL$doy>=xl2016&dayL$year==2016], type="l",
lwd=6, col=colLt)
axis(2, seq(0,1.2, by=.4), las=2, cex.axis=axisC, lwd.ticks=3)
mtext(seq(xl2016,xh2016, by=10),at=seq(xl2016,xh2016, by=10), line=4, side=1, cex=3)
axis(1, seq(xl2016,xh2016, by=10), rep(" ", length(seq(xl2016,xh2016, by=10))) ,cex.axis=axisC, lwd.ticks=3)
mtext("Daily average VPD", side=2, line=18, cex=4)
mtext("(kPa)", side=2, line=10, cex=4)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2017,xh2017), ylim=c(ylD,yhD),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
for(i in 1:dim(prec2017)[1]){
polygon(c(prec2017$doy[i]-.5,prec2017$doy[i]-.5,prec2017$doy[i]+.5,prec2017$doy[i]+.5),
c(0,prec2017$Pr.mm[i]*Prscale,prec2017$Pr.mm[i]*Prscale,0), col="grey60", border=FALSE)
}
points(dayH$doy[dayH$doy<=xh2017&dayH$doy>=xl2017&dayH$year==2017],
dayH$D[dayH$doy<=xh2017&dayH$doy>=xl2017&dayH$year==2017], type="l",
lwd=6, col=colH)
points(dayL$doy[dayL$doy<=xh2017&dayL$doy>=xl2017&dayL$year==2017],
dayL$D[dayL$doy<=xh2017&dayL$doy>=xl2017&dayL$year==2017], type="l",
lwd=6, col=colLt)
legend(165,1.6, c("low density VPD", "high density VPD", "Precipitaiton"),
col=c(colLt, colH, "grey60"), lty=c(1,1,NA), pch=c(NA,NA,15),
bty="n", lwd=c(6,6,NA), cex=4)
axis(4,seq(0,1.2, by=.4),seq(0,1.2, by=.4)*25, las=2, cex.axis=axisC, lwd.ticks=3)
mtext(seq(xl2017,xh2017, by=10),at=seq(xl2017,xh2017, by=10), line=4, side=1, cex=3)
axis(1, seq(xl2017,xh2017, by=10), rep(" ", length(seq(xl2017,xh2017, by=10))) ,cex.axis=axisC, lwd.ticks=3)
mtext("Precipitation", side=4, line=10, cex=4)
mtext("(mm)", side=4, line=18, cex=4)
mtext("Day of year", side=1, outer=TRUE, line=-3, cex=4)
box(which="plot")
dev.off()
#################################################################
####make a panel of subset of half hourly #######
####met and T and gc calc #######
#################################################################
#datHmet and datLmet
#EHHave and gcHHave
#set up plot widths
gcHHave$site <-ifelse(gcHHave$dataset==1|gcHHave$dataset==2,"ld","hd")
wd <- 50
hd <-20
colL <- "royalblue"
colH <- "tomato3"
colHt <- rgb(205/255,79/255,57/255, .5)
colLt <- rgb(65/255,105/255,225/255,.5)
#specify year to plot
yrS <- 2017
xS <- 160
xE <- 168
ylG <- 0
yhG <- 100
ylT <- 0
yhT <- 0.75
ylD <- 0
yhD <- 3
ylP <- 0
yhP <- 1500
jpeg(paste0(plotDI , "\\hh__summary6.jpg"), width=2800, height=2600, units="px")
ab <- layout(matrix(seq(1,4), ncol=1, byrow=TRUE), width=rep(lcm(wd),8), height=rep(lcm(hd),8))
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xS,xE), ylim=c(ylG,yhG),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(gcHHave$doy[gcHHave$doy>=xS&gcHHave$doy<=xE&gcHHave$year==yrS&gcHHave$site=="hd"]+
(gcHHave$hour[gcHHave$doy>=xS&gcHHave$doy<=xE&gcHHave$year==yrS&gcHHave$site=="hd"]/24),
gcHHave$gc.mmol.sf[gcHHave$doy>=xS&gcHHave$doy<=xE&gcHHave$year==yrS&gcHHave$site=="hd"],
col=colH, pch=19, cex=3, type="b", lwd=3)
points(gcHHave$doy[gcHHave$doy>=xS&gcHHave$doy<=xE&gcHHave$year==yrS&gcHHave$site=="ld"]+
(gcHHave$hour[gcHHave$doy>=xS&gcHHave$doy<=xE&gcHHave$year==yrS&gcHHave$site=="ld"]/24),
gcHHave$gc.mmol.sf[gcHHave$doy>=xS&gcHHave$doy<=xE&gcHHave$year==yrS&gcHHave$site=="ld"],
col=colL, pch=19, cex=3, type="b", lwd=3)
mtext("Canopy", side=2, line=32, cex=5)
mtext("stomatal conductance", side=2, line=22, cex=5)
mtext(expression(paste("(mmol m"^"-2","s"^"-1",")")), side=2, line=12, cex=5)
mtext(paste(yrS), side=3, line=7, cex=5)
axis(2, seq(0,300,by=50), las=2, cex.axis=axisC, lwd.ticks=3)
legend(xS+1,yhG, c("low density", "high density"), col=c(colL,colH), pch=19, lwd=3,bty="n", cex=5)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xS,xE), ylim=c(ylT,yhT),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(EHHave$doy[EHHave$doy>=xS&EHHave$doy<=xE&EHHave$year==yrS&EHHave$site=="hd"]+
(EHHave$hour[EHHave$doy>=xS&EHHave$doy<=xE&EHHave$year==yrS&EHHave$site=="hd"]/24),
EHHave$E.hh[EHHave$doy>=xS&EHHave$doy<=xE&EHHave$year==yrS&EHHave$site=="hd"],
col=colH, pch=19, cex=3, type="b", lwd=3)
points(EHHave$doy[EHHave$doy>=xS&EHHave$doy<=xE&EHHave$year==yrS&EHHave$site=="ld"]+
(EHHave$hour[EHHave$doy>=xS&EHHave$doy<=xE&EHHave$year==yrS&EHHave$site=="ld"]/24),
EHHave$E.hh[EHHave$doy>=xS&EHHave$doy<=xE&EHHave$year==yrS&EHHave$site=="ld"],
col=colL, pch=19, cex=3, type="b", lwd=3)
axis(2, seq(0,.6,by=.1), las=2, cex.axis=axisC, lwd.ticks=3)
mtext("Canopy", side=2, line=32, cex=5)
mtext("transpiration", side=2, line=22, cex=5)
mtext(expression(paste("(mmol m"^"-2","s"^"-1",")")), side=2, line=12, cex=5)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xS,xE), ylim=c(ylD,yhD),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(datLmet$doy[datLmet$doy>=xS&datLmet$doy<=xE&datLmet$year==yrS]+
(datLmet$hour[datLmet$doy>=xS&datLmet$doy<=xE&datLmet$year==yrS]/24),
datLmet$D[datLmet$doy>=xS&datLmet$doy<=xE&datLmet$year==yrS],
col=colL, type="l", lwd=6)
points(datHmet$doy[datHmet$doy>=xS&datHmet$doy<=xE&datHmet$year==yrS]+
(datHmet$hour[datHmet$doy>=xS&datHmet$doy<=xE&datHmet$year==yrS]/24),
datHmet$D[datHmet$doy>=xS&datHmet$doy<=xE&datHmet$year==yrS],
col=colH, type="l", lwd=6)
mtext("Vapor pressure", side=2, line=32, cex=5)
mtext("deficit", side=2, line=22, cex=5)
mtext("(kPa)", side=2, line=12, cex=5)
axis(2, seq(0,2.5, by=.5) , las=2, cex.axis=axisC, lwd.ticks=3)
box(which="plot")
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xS,xE), ylim=c(ylP,yhP),type="n", axes=FALSE, xlab=" ", ylab=" ",
yaxs="i", xaxs="i")
points(datHmet$doy[datHmet$doy>=xS&datHmet$doy<=xE&datHmet$year==yrS]+
(datHmet$hour[datHmet$doy>=xS&datHmet$doy<=xE&datHmet$year==yrS]/24),
datHmet$PAR[datHmet$doy>=xS&datHmet$doy<=xE&datHmet$year==yrS],
col=colH, type="l", lwd=6)
points(datLmet$doy[datLmet$doy>=xS&datLmet$doy<=xE&datLmet$year==yrS]+
(datLmet$hour[datLmet$doy>=xS&datLmet$doy<=xE&datLmet$year==yrS]/24),
datLmet$PAR[datLmet$doy>=xS&datLmet$doy<=xE&datLmet$year==yrS],
col=colLt, type="l", lwd=6)
axis(2, seq(0,1200, by=300) , las=2, cex.axis=axisC, lwd.ticks=3)
axis(1, seq(xS,xE, by=1),rep(" ", length(seq(xS,xE, by=1))), las=2, cex.axis=axisC, lwd.ticks=3)
mtext(seq(xS,xE, by=1),at=seq(xS,xE, by=1), line=4, side=1, cex=3)
box(which="plot")
mtext("Day of year", side=1, line=10, cex=5)
mtext("Photosynthetically", side=2, line=32, cex=5)
mtext("active radiation", side=2, line=22, cex=5)
mtext(expression(paste("(",mu,"mol m"^"-2","s"^"-1",")")), side=2, line=12, cex=5)
dev.off()
#################################################################
####make a panel of daily met #######
#################################################################
plotDI
wd <- 45
hd <-25
#day range for x axis
xl2016 <- 182
xh2016 <- 245
xl2017 <- 155
xh2017 <- 230
Tmin <- 0
Tmax<- 30
Dmin <- 0
Dmax <- 2.75
TDmin <- 90
TDmax <- 0
cx.p <- 7
lwp <- 6
cx.a <- 7
lwt <- 6
cx.m <- 6
#subset precip
prec2016 <- datAirP[datAirP$doy<=xh2016&datAirP$doy>=xl2016&datAirP$year==2016,]
prec2017 <- datAirP[datAirP$doy<=xh2017&datAirP$doy>=xl2017&datAirP$year==2017,]
#just plot day L for the presentation
jpeg(paste0(plotDI , "\\daily_met_fig_for_Utica.jpg"), width=3300, height=2600, units="px",quality=100)
layout(matrix(seq(1,6), ncol=2, byrow=TRUE), width=rep(lcm(wd),6), height=rep(lcm(hd),6))
#plot 1
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2016, xh2016), ylim=c(Tmin,Tmax), xaxs="i", yaxs="i", axes=FALSE, xlab =" ", ylab=" ")
points(datLmet$doy[datLmet$year==2016]+(datLmet$hour[datLmet$year==2016]/24), datLmet$Temp[datLmet$year==2016], col="royalblue3", pch=19, cex=cx.p, lwd=lwp, type="l")
box(which="plot")
axis(2,seq(Tmin,Tmax,by=5), cex.axis=cx.a,las=2,lwd.ticks=lwt)
mtext("Average daily", side=2, cex=cx.m, line=25)
mtext("air temperature (C)", side=2, cex=cx.m, line=14)
mtext("2016",side=3,cex=cx.m,line=5)
#plot 2
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2017, xh2017), ylim=c(Tmin,Tmax), xaxs="i", yaxs="i", axes=FALSE, xlab =" ", ylab=" ")
points(datLmet$doy[datLmet$year==2017]+(datLmet$hour[datLmet$year==2017]/24), datLmet$Temp[datLmet$year==2017], col="royalblue3", pch=19, cex=cx.p, lwd=lwp, type="l")
box(which="plot")
mtext("2017",side=3,cex=cx.m,line=5)
#plot 3
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2016, xh2016), ylim=c(Dmin,Dmax), xaxs="i", yaxs="i", axes=FALSE, xlab =" ", ylab=" ")
points(datLmet$doy[datLmet$year==2016]+(datLmet$hour[datLmet$year==2016]/24), datLmet$D[datLmet$year==2016], col="royalblue3", pch=19, cex=cx.p, lwd=lwp, type="l")
box(which="plot")
axis(2,seq(Dmin+.5,Dmax-0.25,by=.5), cex.axis=cx.a,las=2,lwd.ticks=lwt)
mtext("Average daily vapor", side=2, cex=cx.m, line=25)
mtext("pressure deficit (KPa)", side=2, cex=cx.m, line=14)
#plot 4
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2017, xh2017), ylim=c(Dmin,Dmax), xaxs="i", yaxs="i", axes=FALSE, xlab =" ", ylab=" ")
points(datLmet$doy[datLmet$year==2017]+(datLmet$hour[datLmet$year==2017]/24), datLmet$D[datLmet$year==2017], col="royalblue3", pch=19, cex=cx.p, lwd=lwp, type="l")
box(which="plot")
#plot 5
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2016, xh2016), ylim=c(TDmin,TDmax), xaxs="i", yaxs="i", axes=FALSE, xlab =" ", ylab=" ")
points(TDall$doy[TDall$year==2016&TDall$site=="ld"], TDall$TDday[TDall$year==2016&TDall$site=="ld"], col="royalblue3" , lwd=lwp, type="l")
points(TDall$doy[TDall$year==2016&TDall$site=="hd"], TDall$TDday[TDall$year==2016&TDall$site=="hd"], col="tomato3" , lwd=lwp, type="l")
box(which="plot")
axis(2,seq(TDmin,TDmax,by=-10), cex.axis=cx.a,las=2,lwd.ticks=lwt)
axis(1, seq(185,235, by=10),rep(" ",length(seq(185,235, by=10))), cex.axis=cx.a,lwd.ticks=lwt)
mtext(seq(185,235, by=10), at=seq(185,235, by=10), cex=5, side=1,line=5)
mtext("Permafrost", side=2, cex=cx.m, line=25)
mtext("thaw depth(cm)", side=2, cex=cx.m, line=14)
mtext("Day of year",outer=TRUE, side=1, cex=cx.m, line=-10)
legend(215,5, c("low density", "high density"), col=c("royalblue3", "tomato3"), cex=8, lwd=lwp,bty="n")
#plot 6
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), xlim=c(xl2017, xh2017), ylim=c(TDmin,TDmax), xaxs="i", yaxs="i", axes=FALSE, xlab =" ", ylab=" ")
points(TDall$doy[TDall$year==2017&TDall$site=="ld"], TDall$TDday[TDall$year==2017&TDall$site=="ld"], col="royalblue3", lwd=lwp, type="l")
points(TDall$doy[TDall$year==2017&TDall$site=="hd"], TDall$TDday[TDall$year==2017&TDall$site=="hd"], col="tomato3" , lwd=lwp, type="l")
box(which="plot")
axis(1, seq(160,230, by=10),rep(" ",length(seq(160,230, by=10))), cex.axis=cx.a,lwd.ticks=lwt)
mtext(seq(160,230, by=10), at=seq(160,230, by=10), cex=5, side=1,line=5)
dev.off()
########################################################################################################
##### make a plot of leaf area to sapwood area
wd <- 40
hd <- 40
jpeg(paste0(plotDI , "\\leaf_sap_comp.jpg"), width=1500, height=1500, units="px",quality=100)
layout(matrix(c(1),ncol=1), width=lcm(wd), height=lcm(hd))
plot(c(0,1),c(0,1), xlim=c(-.5,5.5), ylim=c(0,2), xlab=" ", ylab=" ", xaxs="i",yaxs="i", axes=FALSE, type="n")
polygon(c(0,0,2,2), c(0,datLSA$LSrat[1],datLSA$LSrat[1],0), col="royalblue3")
polygon(c(3,3,5,5), c(0,datLSA$LSrat[2],datLSA$LSrat[2],0), col="tomato3")
arrows(c(1,4), datLSA$LSrat-(datLSA$LSrat.sd/sqrt(datLSA$LSrat.n)),
c(1,4),datLSA$LSrat+(datLSA$LSrat.sd/sqrt(datLSA$LSrat.n)), code=0, lwd=4)
axis(1, c(-1,1,4,6), c(" ", " "," ", " "), cex.axis=3, lwd.ticks=4)
mtext(c("low density", "high density"), side=1,at=c(1,4), line=2, cex=3)
axis(2, seq(0,2, by=.5), cex.axis=3, lwd.ticks=4,las=2)
legend(3.5,2, c("1 se"), lwd=4, bty="n", cex=4)
mtext("Stand", side=1, line=6, cex=5)
mtext(expression(paste("Leaf area : sapwood area ratio (m"^"2"~"cm"^"-2"~")")), side=2, cex=5, line=7)
dev.off()
|
a4310c6544f253330d218a0d4a696438ab02b6aa | 388bda89408c41d303d4ded330323a0edabfd8cc | /Scripts/server.R | 1e2c0d1f2dc9015ebca3d89daaf92dbdfdcd6976 | [] | no_license | edwardhalimm/DrugSeizure | 51efe4603b09dbdbebe3f42d284496046c39ef1c | 6a262c5f7bc8a7bd5a9de2f8af2d4edae8df8e3c | refs/heads/master | 2020-04-06T17:37:48.704358 | 2019-01-29T21:29:57 | 2019-01-29T21:29:57 | 157,667,264 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,025 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(leaflet)
library(httr)
library(dplyr)
library(readxl)
library(shiny)
library(shinydashboard)
library(graphics)
library(googleVis)
library(ggplot2)
#Read in original data set and set useful columns
data <- suppressWarnings(read_xlsx("data/IDSReport.xlsx", sheet = 6, col_names = TRUE))
data <- select(data, SUBREGION , COUNTRY, SEIZURE_DATE, DRUG_NAME, AMOUNT, DRUG_UNIT, PRODUCING_COUNTRY,
DEPARTURE_COUNTRY, DESTINATION_COUNTRY)
#Read in coordinates data sets
coords <- read.csv("data/coords.csv", stringsAsFactors = FALSE)
names(coords) <- c("iso2c", "lat", "lng", "name")
match_table <- read.csv("data/country_codes.csv", stringsAsFactors = FALSE)
match_table <- select(match_table,country_name,iso2c)
#Join coordinate columns to data
#loses a few that do not exist in both(useful)
match_and_coords <- inner_join(coords, match_table, by = "iso2c") %>% select(country_name, lat, lng)
names(match_and_coords) <- c("COUNTRY","LAT_COUNTRY","LNG_COUNTRY")
data <- left_join(data, match_and_coords, by = c("COUNTRY"))
names(match_and_coords) <- c("COUNTRY","LAT_PRODUCING","LNG_PRODUCING")
data <- left_join(data, match_and_coords, by = c("PRODUCING_COUNTRY" = "COUNTRY"))
names(match_and_coords) <- c("COUNTRY","LAT_DEPARTURE","LNG_DEPARTURE")
data <- left_join(data, match_and_coords, by = c("DEPARTURE_COUNTRY" = "COUNTRY"))
names(match_and_coords) <- c("COUNTRY","LAT_DESTINATION","LNG_DESTINATION")
data <- left_join(data, match_and_coords, by = c("DESTINATION_COUNTRY" = "COUNTRY"))
names(match_and_coords) <- c("country", "lat", "long")
coords <- match_and_coords
#arrow_chart <- read.csv("../data/arrow_chart.csv", stringsAsFactors = FALSE)
#arrow_chart$long <- as.numeric(arrow_chart$long)
#arrow_chart$lat <- as.numeric(arrow_chart$lat)
# token <- pk.eyJ1IjoiamFuZXR0ZWN3ayIsImEiOiJjanA2ZHJwcW0wOHk3M3BvNmNlYWE2dGJ5In0.ZsZjug12tYHP1K_751NFWA
#maptile <- "https://api.mapbox.com/v4/mapbox.emerald/page.html?access_token=pk.eyJ1IjoiamFuZXR0ZWN3ayIsImEiOiJjanA2ZHJwcW0wOHk3M3BvNmNlYWE2dGJ5In0.ZsZjug12tYHP1K_751NFWA"
shinyServer(function(input, output) {
getData <- reactive({
input_data <- filter(data, DRUG_NAME == input$drug)
})
output$seizures_map <- renderLeaflet({
input_data <- getData()
leaflet() %>%
addTiles() %>%
setView(lat = 49.81749 ,lng = 15.47296,zoom = 3) %>%
addCircles( lng = input_data$LNG_COUNTRY, lat = input_data$LAT_COUNTRY,
weight = 1,
radius = input_data$AMOUNT / 5,
color = "#FF2500",
popup = paste("Country: ", input_data$COUNTRY,
"<br>Drug Name: ", input_data$DRUG_NAME,
"<br>Amount: ", input_data$AMOUNT, input_data$DRUG_UNIT)) %>%
#Button to zoom out
addEasyButton((easyButton(
icon = "fa-globe", title = "Zoom to Level 1",
onClick = JS("function(btn, map){map.setZoom(1); }")
))) %>%
#Button to locate user
addEasyButton(easyButton(
icon = "fa-crosshairs", title = "Locate Me",
onClick = JS("function(btn,map){ map.locate({setView:true}); }")
))
})
# delete?
output$relationship_map <- renderLeaflet({
leaflet() %>%
addTiles()
})
output$relationship_map <- renderLeaflet({
drug_data <- read_xlsx("data/IDSReport.xlsx", sheet = 6, col_names = TRUE)
location_data <- read_xlsx("data/Location_longitude_latitude.xlsx", col_names = TRUE)
val <- 0
df <- data.frame(lat=numeric(0), lng=numeric(0), stringsAsFactors=FALSE)
selected_country <- filter(drug_data, input$country2 == COUNTRY)
as.data.frame(selected_country)
coordinates <- filter(location_data, input$country2 == name)
select_lat <- coordinates$latitude[1]
select_lng <- coordinates$longitude[1]
for(row in 1:nrow(selected_country)) {
if(!is.na(selected_country$PRODUCING_COUNTRY[row]) & selected_country$PRODUCING_COUNTRY[row] != "Unknown") {
coordinates <- filter(location_data, selected_country$PRODUCING_COUNTRY[row] == name)
df[nrow(df)+1,] <- c(coordinates$latitude[1], coordinates$longitude[1])
if (input$country2== selected_country$PRODUCING_COUNTRY[row]) {
val <- 1
}
}
}
icon <- awesomeIcons(icon = 'flag', iconColor = 'red')
if(val == 0) {
na.omit(df)
df %>%
leaflet() %>%
addTiles() %>%
addMarkers(popup="Producing Country") %>%
addAwesomeMarkers(lat = select_lat, lng = select_lng, icon = icon, popup="Seizure Country")
} else {
df %>%
leaflet() %>%
addTiles() %>%
addMarkers(popup="Producing Country") %>%
addAwesomeMarkers(lat = select_lat, lng = select_lng, icon = icon, popup="Seizure and Producing Country")
}
})
#Subregion Data
subregionCoords <- read.csv("data/subregion_coords.csv", stringsAsFactors = FALSE)
#Icon
skullIcon <- iconList(
skull = makeIcon("skull.png", "data/skull.png", 40, 40)
)
#Amount of drug seizure each subregion
count_subregion <- group_by(data, SUBREGION) %>%
summarise(count = n())
#Amount of drug type each subregion
drug_type_count <- reactive({
drug_in_each_subregion <- filter(data, data$SUBREGION == input$subregion)
drug_type_in_subregion <- filter(drug_in_each_subregion, drug_in_each_subregion$DRUG_NAME == input$drugType)
count_drug_in_subregion <- group_by(drug_type_in_subregion, DRUG_NAME) %>%
summarise(count = n())
number_of_the_drug <- count_drug_in_subregion$count
number_of_the_drug
})
#Number of total drug seizure in each subregion (There is this much of drug seizure in this subregion)
content <- paste(sep = "<br/>", count_subregion$count)
#Subregion Set View LATITUDE
view_latitude <- reactive({
finding_lat <- filter(subregionCoords, subregionCoords$subregion == input$subregion)
the_lat <- finding_lat$latitude
})
#Subregion Set View LONGITUDE
view_longitude <- reactive({
finding_long <- filter(subregionCoords, subregionCoords$subregion == input$subregion)
the_long <- finding_long$longitude
})
#Leaflet most_region_map
output$most_region_map <- renderLeaflet({
drug_in_the_region <- paste("The number of ", input$drugType, " seizure in this region: ", drug_type_count(), sep="")
num_long <- view_longitude()
num_lat <- view_latitude()
leaflet(data = subregionCoords[1:13,]) %>%
addTiles() %>%
setView(lng = num_long, lat = num_lat, zoom = 5) %>%
addMarkers(lng = subregionCoords$longitude, lat = subregionCoords$latitude,
icon = ~skullIcon,
label = "Press Me",
labelOptions = labelOptions(direction = "bottom",
style = list(
"color" = "red",
"font-family" = "serif",
"font-style" = "italic",
"box-shadow" = "3px 3px rgba(0,0,0,0.25)",
"font-size" = "12px",
"border-color" = "rgba(0,0,0,0.5)")),
popup = paste ("<b>", subregionCoords$subregion,"</b>", "<br>",
"Amount of drug seizure in this region: ", content, "<br>",
drug_in_the_region)
) %>%
addEasyButton((easyButton(
icon = "fa-globe", title = "Zoom out",
onClick = JS("function(btn, map){map.setZoom(1); }")
))) %>%
addMeasure(
position = "bottomleft",
primaryLengthUnit = "meters",
primaryAreaUnit = "sqmeters",
activeColor = "#3D535D",
completedColor = "#7D4479")
})
drug_data <- data
get_filtered <- function(current_country, relationship, current_drug) {
if (current_drug == "ALL"){
selected_country <- filter(drug_data, COUNTRY == current_country)
} else {
selected_country <- filter(drug_data, COUNTRY == current_country & DRUG_NAME == current_drug)
}
if(relationship == "Country of Origin") {
df <- data.frame(COUNTRY = as.character(), PRODUCING_COUNTRY = as.character(), stringsAsFactors=FALSE)
for(row in 1:nrow(selected_country)) {
if(!is.na(selected_country$PRODUCING_COUNTRY[row]) & selected_country$PRODUCING_COUNTRY[row] != "Unknown") {
df[nrow(df)+1,] <- c(current_country, selected_country$PRODUCING_COUNTRY[row] %>% as.character())
}
}
df
} else if (relationship == "Destination Country") {
df <- data.frame(COUNTRY = as.character(), DESTINATION_COUNTRY = as.character(), stringsAsFactors=FALSE)
for(row in 1:nrow(selected_country)) {
if(!is.na(selected_country$DESTINATION_COUNTRY[row]) & selected_country$DESTINATION_COUNTRY[row] != "Unknown") {
df[nrow(df)+1,] <- c(current_country, selected_country$DESTINATION_COUNTRY[row] %>% as.character())
}
}
df
}
}
get_country_plot <- function(input) {
data_for_plot <- get_filtered(input$country, input$relationship, input$drug)
my_angle <- numeric()
if(input$angle == "Vertical") {
my_angle <- 90
} else if (input$angle == "Horizontal") {
my_angle <- 0
}
if (input$relationship == "Country of Origin") {
ggplot(data_for_plot, aes(x = PRODUCING_COUNTRY)) +
geom_bar(stat = "count", fill = "darkgreen") +
labs(title = "Counts of Seizures for a Given Country Produced in or Destined for Another Country") +
theme(text = element_text(size=15),
axis.text.x = element_text(angle=my_angle, hjust=1),
plot.margin=unit(c(1,1,1.5,1.5),"cm"))
} else if (input$relationship == "Destination Country") {
ggplot(data_for_plot, aes(x = DESTINATION_COUNTRY)) +
geom_bar(stat = "count", fill = "orangered") +
labs(title = "Counts of Seizures for a Given Country Produced in or Destined for Another Country") +
theme(text = element_text(size=15),
axis.text.x = element_text(angle=my_angle, hjust=1),
plot.margin=unit(c(1,1,1.5,1.5),"cm"))
}
}
output$country_chart <- renderPlot({
get_country_plot(input)
})
output$summary <- renderPrint({
"Navigate the draggable menu to investigate drug trafficking trends based on data from the United Nations Office on Drugs and Crime. NOTE: Data is not available for all countries."
})
})
|
3f16eaf6f9c0b378410efa25960e3925060354fe | 65d97a10d91455337de2d04160a2d88e32fef34f | /package/pathtemp/R/pathsample.R | 40569ccb108c0b6e580079b7dc0fffe410a221e6 | [] | no_license | yao-yl/path-tempering | 1552c9da8575c8af45350c39a45c6966d9085e94 | 21778401020b6820d7c38bbc062534f50d5efd8b | refs/heads/master | 2022-12-20T19:12:24.064577 | 2020-10-05T04:06:43 | 2020-10-05T04:06:43 | 286,168,339 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,037 | r | pathsample.R | #' Adaptive path sampling
#'
#' Run path sampling with adaptations.
#'
#' @export
#' @param sampling_model The stan model generated from \code{\link{code_temperature_augmented}}.
#' @param data_list The list of data used in the original stan model.
#' @param N_loop The max adaptations. The default is 10.
#' @param max_sample_store The max sample to store from previous adaptaions. The default is
#' 4000.
#' @param iter The number of iterations for the augmented stan program.
#' @param iter_final The number of iterations during the final adaptation.
#' @param max_treedepth The max tree depth for the augmented stan program. A smaller
#' max_treedepth may increase exploration. efficiency in early adaptations.
#' @param thin The number of thining for the augmented stan program.
#' @param a_lower The lower bound of the quadrature. When a < a_lower, the inverse
#' temperature is 0 and the sample is from the base.
#' @param a_upper The upper bound of the quadrature. When a > a_upper, the inverse
#' temperature is 1 and the sample is from the target.
#' @param K_logit An integer, the length of the logit kernels. The default is 20.
#' @param K_gaussian An integer, the length of the Gaussian kernels. The default is 20.
#' @param N_grid The number of internal interpolations in parametric estimation
#' The default is 100.
#' @param visualize_progress whether to visualize the progress.
#'
#' @details The function fits one adaptations of path sampling with given
#' parameters and pseudo priors that are specified by \code{b}. It runs the stan program,
#' run path sampling to compute the log normalization constants and log marginal,
#' and these two estimations with extra parametric regularization.
#'
#'
#'
#' @examples
#' \dontrun{
#' library(rstan)
#' rstan_options(auto_write = TRUE)
#' sampling_model=stan_model(file_new)
#' path_sample_fit=path_sample(sampling_model=sampling_model,
#' iter_final=6000, iter=2000,
#' data=list(gap=10), N_loop = 6,
#' visualize_progress = TRUE)
#' sim_cauchy=extract(path_sample_fit$fit_main)
#' in_target= sim_cauchy$lambda==1
#' in_prior = sim_cauchy$lambda==0
#' # sample from the target
#' hist(sim_cauchy$theta[in_target])
#' # sample from the base
#' hist(sim_cauchy$theta[in_prior])
# the joint "path"
#' plot(sim_cauchy$a, sim_cauchy$theta)
# the normalization constant
#' plot(g_lambda(path_sample_fit$path_post_a), path_sample_fit$path_post_z)
#'}
#'
#'
#' @return a \code{\link{path_fit}} object.
#'
#' @seealso \code{\link{path_quadrature}}, \code{\link{path_gradients}}, \code{\link{path_fit}}.
#'
#'
#
path_sample=function(sampling_model,data_list, visualize_progress=FALSE,
N_loop =10, max_sample_store=4000,
a_lower =.1, a_upper = .8, K_logit = 20, K_gaussian=20, N_grid=100,
thin=2, iter=2000, max_treedepth=10, iter_final=4000){
# initialization:
b <- rep(0, 1 + K_logit + K_gaussian)
mu_logit <- a_lower + (a_upper - a_lower)*seq(1,2*K_logit-1,2)/(2*K_logit-1)
sigma_logit <- 2*rep((a_upper - a_lower)/K_logit, K_logit)
mu_gaussian <- mu_logit
sigma_gaussian <- sigma_logit
all_a=NULL
all_log_lik=NULL
if(visualize_progress==TRUE)
par(mfrow=c(N_loop,3), mar=c(2,2,2,1), mgp=c(1.5,.5,0), tck=-.01)
for (i in 1:N_loop){
fit <- path_fit(sampling_model=sampling_model, data_list=data_list,
a_lower, a_upper, b, K_logit, mu_logit, sigma_logit, K_gaussian, mu_gaussian, sigma_gaussian,
all_a, all_log_lik, N_grid=N_grid,iter=ifelse(i==N_loop, iter_final, iter),
max_treedepth=max_treedepth, thin=thin, visualize_progress=visualize_progress)
b <- -fit$b
all_a <- fit$all_a
all_log_lik <- fit$all_log_lik
if(length(all_a)> max_sample_store )
{
discard=length(all_a)- max_sample_store
all_a=all_a[-c(1:discard)]
all_log_lik=all_log_lik[-c(1:discard)]
}
print( paste("------ adaptation=", i,"---------fit measure = ", fit$fit_measure,"---------"))
}
return(fit)
}
|
2168fe9518ce3ced757013ff1a2c5bd0d94617d1 | 6d9ab08f20be79379b2975f7162789229a4d838d | /R/con_zs_holland.R | 0f0878cd8a582a25d632d5145627071c15558b83 | [] | no_license | cran/holland | 7189a58a79d78614158fc5ca0a7433734422a870 | 9d49ebdc45998936b6934c92b68a5c418cc529a3 | refs/heads/master | 2023-07-15T11:10:44.038514 | 2021-09-01T07:30:02 | 2021-09-01T07:30:02 | 379,600,021 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,646 | r | con_zs_holland.R | #' @title Congruence Index according to Zener & Schnuelle (1976)
#' @keywords congruence
#' @export con_zs_holland
#' @description The function computes the congruence index according to Zener & Schnuelle (1976).
#' @details The function finds the congruence according to Zener & Schnuelle (1976) between the three-letter Holland-codes given in argument a, which is the person code, and argument b, which is the environment code. The Index as defined by Zener & Schnuelle (1976) targets (only) three letters from the Holland code. The degree of congruence is output, according to its definition by Zener & Schnuelle (1976), as a reciprocal value of a distance. This means, for example, that a value of '6' is the result for a perfect fit of two three-letter codes !
#' @param a a character vector with person Holland codes.
#' @param b a character vector with environment Holland codes.
#' @return a numeric with value for congruence.
#' @references Holland, J.L. 1963. A theory of vocational choice. I. Vocational images and choice. \emph{Vocational Guidance Quarterly, 11}(4), 232–239.
#' @references Zener, T. B. & Schnuelle, L. (1976). Effects of the self-directed search on high school students. \emph{Journal of Counseling Psychology, 23}(4), 353–359.
#' @examples
#' con_zs_holland(a="RIA",b="SEC") # max. difference
#' con_zs_holland(a="RIA",b="RIA") # max. similarity
################################################################################
# func. by joerg-henrik heine jhheine(at)googlemail.com
con_zs_holland <- function(a,b)
# kongruenzindex nach Zener-Schnuelle(1976)
# zur Bestimmung der Übereinstimmung für jede zeile der matrix X==Personencodes mit
# dem Vector V==Umweltcode diese Reihenfolge ist bei der Eingabe W I C H T I G !!!
# (vgl. Joerin-Fux, Simone "Persö. & Berufst.: Theor. & Instrum. v. J. Holland")
# (vgl. Sageder,J. in Abel, J.& Tarnai, Ch. 19xx)
{
a <- toupper(unlist(strsplit(a,split = "",fixed = TRUE))) # um auch z.B. "RIA" eingeben zu können
b <- toupper(unlist(strsplit(b,split = "",fixed = TRUE))) # um auch z.B. "RIA" eingeben zu können
if(length(a) != length(b)) stop("a and b must have the same number of characters in Holland-code")
if(length(a) > 3) stop("the zs index is in this function limited to three-letter Holland-codes")
collapse<-function (x, sep = "") {paste(x, collapse = sep)}# abgewandelt aus # library(BBmisc)
erg1 <- collapse(as.character(charmatch(a,b,nomatch=0 )))
erg1[erg1=="123"]<-6 # alle 3 gleich (in richtiger reihenfolge)
erg1[erg1=="120"]<-5 # erste 2 gleich (in richtiger reihenfolge)
erg1[erg1=="321"]<-4 # 3 gleich bel. reihenfolge (ohne 123)
erg1[erg1=="312"]<-4 # 3 gleich bel. reihenfolge (ohne 123)
erg1[erg1=="231"]<-4 # 3 gleich bel. reihenfolge (ohne 123)
erg1[erg1=="213"]<-4 # 3 gleich bel. reihenfolge (ohne 123)
erg1[erg1=="132"]<-4 # 3 gleich bel. reihenfolge (ohne 123)
erg1[erg1=="100"]<-3 # nur erster gleich
erg1[erg1=="130"]<-2 # ersten 2 gleich bel. reihenfolge (ohne 120)
erg1[erg1=="210"]<-2 # ersten 2 gleich bel. reihenfolge (ohne 120)
erg1[erg1=="230"]<-2 # ersten 2 gleich bel. reihenfolge (ohne 120)
erg1[erg1=="310"]<-2 # ersten 2 gleich bel. reihenfolge (ohne 120)
erg1[erg1=="320"]<-2 # ersten 2 gleich bel. reihenfolge (ohne 120)
erg1[erg1=="100"]<-1 # nur erster an bel. stelle gleich
erg1[erg1=="200"]<-1 # nur erster an bel. stelle gleich
erg1[erg1=="300"]<-1 # nur erster an bel. stelle gleich
erg1[erg1=="000"]<-0 # keine gleich
erg1[erg1!="0"&erg1!="1"&erg1!="2"&erg1!="3"&erg1!="4"&erg1!="5"&erg1!="6"]<-0
erg1<-as.numeric(erg1)
return (erg1)
}
|
25ae077478a2b86bbe959a22d0998cca998f8505 | cf09008185b813e272bbe208120852ebfb277fe8 | /GCD_quiz_4.R | e0353155554548f0f2c8d751f47b73e1ba6ceb5a | [] | no_license | AnkurDesai11/datascienceJHUcoursera | efd1eedd5ab29c8835ac0cf129fa1b9e68b88719 | 4a36448fb2827d4f5048c8b21c210684cf363746 | refs/heads/master | 2023-02-09T21:29:26.288860 | 2021-01-11T04:26:39 | 2021-01-11T04:26:39 | 255,146,982 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,374 | r | GCD_quiz_4.R | #question 1
urlq1<-"https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(urlq1, destfile="GCD_quiz4_dataset1.csv")
ds1<-read.csv("GCD_quiz4_dataset1.csv")
splitnames<-strsplit(names(ds1), "wgtp")
splitnames[[123]]
#question 2
urlq2<-"https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(urlq2, destfile="GCD_quiz4_dataset2.csv")
ds2<-read.csv("GCD_quiz4_dataset2.csv")
names(ds2)
ds2gdp<-suppressWarnings(as.numeric(gsub("\\,", "", ds2$X.3)))
sum(ds2gdp, na.rm=TRUE)/nrow(ds2)
#question 4 - using ds2 from before
urlq3<-"https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(urlq3, destfile="GCD_quiz4_dataset3.csv")
ds3<-read.csv("GCD_quiz4_dataset3.csv")
ds4 <- ds2[-(1:4), ]
colnames(ds4) = c("CountryCode", "rank", "x1", "name", "GDP", "abc", "x5", "x6", "x7", "x8")
ds5<-merge(ds3, ds4, by.x = "CountryCode", by.y = "CountryCode")
length(grep("^(Fiscal year end: June)", ds5$Special.Notes))
#question 5
library(lubridate)
library(quantmod)
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
length(grep("^2012", sampleTimes))
sampletimes2012<-sampleTimes[grep("^2012", sampleTimes)]
sampletimes2012<-ymd(sampletimes2012)
sampletimes2012weekday<-wday(sampletimes2012)
length(sampletimes2012weekday[sampletimes2012weekday==2]) |
b761222ea452c2926ac24179f0aded8d4af3f1b4 | 690c3c3e583094011d339d20a819b0fbe11a2bf8 | /stream_flow.R | 2aad1975440142b876e9989a4c0b72d048dcd3f2 | [] | no_license | AllisonVincent/StarFM-code | a0f907e2931460b7867600bd1566cb39a600338b | eac755b6ef61af5d1925b3b65d02269c846e79e1 | refs/heads/master | 2021-06-17T15:02:43.013841 | 2021-04-20T17:19:42 | 2021-04-20T17:19:42 | 194,706,294 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,633 | r | stream_flow.R | setwd('C:/Users/Allison and Brian/Documents/Research/STARFM/STARFMtest/Analysis_Tests/Stream_data')
library(sp)
library(sf)
library(ggplot2)
library(rgdal)
library(raster)
library(dplyr)
library(caret)
library(data.table)
library(stats)
library(fields)
library(hydroTSM)
library(SpatialTools)
library(foreign)
library(rasterVis)
library(RColorBrewer)
library(viridis)
### Read in the csv file
df_2008<- read.csv(file = 'wy_2008.csv')
df_2010<- read.csv(file = 'wy_2010.csv')
df_2012<- read.csv(file = 'wy_2012.csv')
### Convert dates from characters to dates
df_2008$Date<- as.Date(df_2008$Date, format = "%m/%d/%Y")
df_2010$Date<- as.Date(df_2010$Date, format = "%m/%d/%Y")
df_2012$Date<- as.Date(df_2012$Date, format = "%m/%d/%Y")
### Map discharge from individual water years
ggplot(data = df_2008, mapping = aes(x = Date, y = cfs)) +
geom_line() +
labs(
x = "Date",
y = "Discharge (cfs)",
title = "Water Year 2008"
)
ggplot(data = df_2010, mapping = aes(x = Date, y = cfs)) +
geom_line() +
labs(
x = "Date",
y = "Discharge (cfs)",
title = "Water Year 2010"
)
ggplot(data = df_2012, mapping = aes(x = Date, y = cfs)) +
geom_line() +
labs(
x = "Date",
y = "Discharge (cfs)",
title = "Water Year 2012"
)
###### Plot all discharge data together
df_all<- read.csv(file = 'all_wys.csv')
DOWY<- df_all$DOWY
wy2008_q<- df_all$X2008
wy2010_q<- df_all$X2010
wy2012_q<- df_all$X2012
ggplot(df_all) +
geom_line(aes(x = DOWY, y = wy2008_q, color = "blue")) +
geom_line(aes(x = DOWY, y = wy2010_q, color = "black")) +
geom_line(aes(x = DOWY, y = wy2012_q, color = "red")) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
plot.title = element_text(size = 15)) +
scale_color_identity(name = "Water Year",
breaks = c("blue", "black", "red"),
labels = c("WY 2008", "WY 2010", "WY 2012"),
guide = "legend") +
ggtitle("Discharge at Watershed Outlet by Water Year") +
xlab("Day of Water Year") +
ylab("Discharge (cfs)")
##### Create cumulative discharge plots for individual water years
df_2008[, "cum_Q"] <- cumsum(df_2008$cfs)
df_2010[, "cum_Q"] <- cumsum(df_2010$cfs)
df_2012[, "cum_Q"] <- cumsum(df_2012$cfs)
ggplot(data = df_2008, mapping = aes(x = Date, y = cum_Q)) +
geom_line() +
labs(
x = "Date",
y = "Cumulative Discharge (cfs)",
title = "Water Year 2008"
)
ggplot(data = df_2010, mapping = aes(x = Date, y = cum_Q)) +
geom_line() +
labs(
x = "Date",
y = "Cumulative Discharge (cfs)",
title = "Water Year 2010"
)
ggplot(data = df_2012, mapping = aes(x = Date, y = cum_Q)) +
geom_line() +
labs(
x = "Date",
y = "Cumulative Discharge (cfs)",
title = "Water Year 2012"
)
####### Create cumulative sum discharge plot with all water years
## Two of our WYs are leap years, so they contain one extra value than WY 2010. To do a cumulative sum, we need to replace the NA for this day with a value of 0
temp_wy2010<- df_all$X2010
temp_wy2010[is.na(temp_wy2010)] = 0
df_all[, "cum_Q_2008"] <- cumsum(df_all$X2008)
df_all[, "cum_Q_2010"] <- cumsum(temp_wy2010)
df_all[, "cum_Q_2012"] <- cumsum(df_all$X2012)
cumsum_2008<- df_all$cum_Q_2008
cumsum_2010<- df_all$cum_Q_2010
cumsum_2012<- df_all$cum_Q_2012
ggplot(df_all) +
geom_line(aes(x = DOWY, y = cumsum_2008, color = "blue")) +
geom_line(aes(x = DOWY, y = cumsum_2010, color = "black")) +
geom_line(aes(x = DOWY, y = cumsum_2012, color = "red")) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
plot.title = element_text(size = 13)) +
scale_color_identity(name = "Water Year",
breaks = c("blue", "black", "red"),
labels = c("WY 2008", "WY 2010", "WY 2012"),
guide = "legend") +
ggtitle("Discharge Cumulative Sums at Watershed Outlet by Water Year") +
xlab("Day of Water Year") +
ylab("Discharge (cfs)")
### Find the center of mass, or when 50% of discharge has passed (doing this manually because I'm not sure how else to do it)
com_2008<- cumsum_2008[366]/2 ## according to the cumsum values, 50% of discharge passes through on day 253
com_2010<- cumsum_2010[366]/2 ## according to the cumsum values, 50% of discharge passes through on day 246 (subtract 1 for the leap year)
com_2012<- cumsum_2012[366]/2 ## according to the cumsum values, 50% of discharge passes through on day 221
########### Combine the percent pixel snow covered data with the hydrographs for their respective years
### Load the table that has the total number of snow-covered pixels by date and percentage of the watershed that is snow covered
setwd('C:/Users/Allison and Brian/Documents/Research/STARFM/STARFMtest/Analysis_Tests')
## load the snow percent by day data
wy2008_total<- read.csv("./WY2008/WY2008_snow_sum_by_day.csv")
wy2010_total<- read.csv("./WY2010/WY2010_snow_sum_by_day.csv")
wy2012_total<- read.csv("./WY2012/WY2012_snow_sum_by_day.csv")
wy2008_total$Date<- as.Date(wy2008_total$Date, format = "%m/%d/%Y")
wy2010_total$Date<- as.Date(wy2010_total$Date, format = "%m/%d/%Y")
wy2012_total$Date<- as.Date(wy2012_total$Date, format = "%m/%d/%Y")
ggplot(data = wy2008_total, mapping = aes(x = Date, y = perc.of.watershed)) +
geom_line() +
labs(
x = "Date",
y = "Perc of snow covered pixels",
title = "Water Year 2008"
)
ggplot(data = wy2010_total, mapping = aes(x = Date, y = perc.of.watershed)) +
geom_line() +
labs(
x = "Date",
y = "Perc of snow covered pixels",
title = "Water Year 2010"
)
ggplot(data = wy2012_total, mapping = aes(x = Date, y = perc.of.watershed)) +
geom_line() +
labs(
x = "Date",
y = "Perc of snow covered pixels",
title = "Water Year 2012"
)
##### Use the function below to find the moving average for each water year
# x: the vector
# n: the number of samples
# centered: if FALSE, then average current sample and previous (n-1) samples
# if TRUE, then average symmetrically in past and future. (If n is even, use one more sample from future.)
movingAverage <- function(x, n=1, centered=FALSE) {
if (centered) {
before <- floor ((n-1)/2)
after <- ceiling((n-1)/2)
} else {
before <- n-1
after <- 0
}
# Track the sum and count of number of non-NA items
s <- rep(0, length(x))
count <- rep(0, length(x))
# Add the centered data
new <- x
# Add to count list wherever there isn't a
count <- count + !is.na(new)
# Now replace NA_s with 0_s and add to total
new[is.na(new)] <- 0
s <- s + new
# Add the data from before
i <- 1
while (i <= before) {
# This is the vector with offset values to add
new <- c(rep(NA, i), x[1:(length(x)-i)])
count <- count + !is.na(new)
new[is.na(new)] <- 0
s <- s + new
i <- i+1
}
# Add the data from after
i <- 1
while (i <= after) {
# This is the vector with offset values to add
new <- c(x[(i+1):length(x)], rep(NA, i))
count <- count + !is.na(new)
new[is.na(new)] <- 0
s <- s + new
i <- i+1
}
# return sum divided by count
s/count
}
#### Plot the daily percent snow values as bars and the moving average as a line on top
perc_all_wy<- read.csv(file = './inter_annual/snow_perc_all_years.csv')
DOWY<- perc_all_wy$DOWY
wy2008_perc<- perc_all_wy$X2008_perc
wy2010_perc<- perc_all_wy$X2010_perc
wy2012_perc<- perc_all_wy$X2012_perc
### create plot for WY 2008
wy_2008_avg<- movingAverage(wy2008_perc, 10, TRUE) ## A 10-day centered moving average seems to be the minimum for a smoother curve
perc_all_wy$mvg_avg_2008<- wy_2008_avg
### find the latest day of the water year when percent snow cover is 50% or greater
which(wy_2008_avg >= 50) ## gives all position where the value is above 50%. Take the last one in the array
ggplot(perc_all_wy) +
geom_col(aes(x = DOWY, y = wy2008_perc, fill = "sky blue"), color = "sky blue") + ## snow cover percent
geom_line(aes(x = DOWY, y = wy_2008_avg, color = "firebrick"), size = 1.0) + ## moving avg line
geom_line(aes(x = DOWY, y = wy2008_q/10, color = "black"), size = 1.0) + ## stream discharge
geom_vline(aes(xintercept = 253, color = "purple"), size = 1) + ## the line that indicates the center of mass
geom_vline(aes(xintercept = 208, color = "dark green"), size = 1) + ## the line that indicates the last day when the mvg avg of snow cover percent is 59% or greater
scale_y_continuous(
name = "Percent",
sec.axis = sec_axis(~.*10, name = "Discharge (cfs)")
) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
legend.text = element_text(size = 11),
legend.title = element_text(size = 14),
plot.title = element_text(size = 13)) +
scale_fill_identity(name = NULL,
breaks = c("sky blue"),
labels = c("SCA(%)"),
guide = "legend") +
scale_color_identity(name = NULL,
breaks = c("firebrick", "black", "purple", "dark green"),
labels = c("SCA Moving Avg", "Stream Discharge", "Q center of mass", "Last day of 50% SCA"),
guide = "legend") +
ggtitle("Percent Snow Cover and Stream Discharge for WY 2008") +
xlab("Day of Water Year")
### create plot for WY 2010
temp_wy2010<- wy2010_perc
temp_wy2010[is.na(temp_wy2010)] = 0
wy_2010_avg<- movingAverage(temp_wy2010, 10, TRUE)
perc_all_wy$mvg_avg_2010<- wy_2010_avg
### find the latest day of the water year when percent snow cover is 50% or greater
which(wy_2010_avg >= 50) ## gives all position where the value is above 50%. Take the last one in the array
ggplot(perc_all_wy) +
geom_col(aes(x = DOWY, y = wy2010_perc, fill = "sky blue"), color = "sky blue") +
geom_line(aes(x = DOWY, y = wy_2010_avg, color = "firebrick"), size = 1.0) +
geom_line(aes(x = DOWY, y = wy2010_q/10, color = "black"), size = 1.0) +
geom_vline(aes(xintercept = 246, color = "purple"), size = 1) +
geom_vline(aes(xintercept = 120, color = "dark green"), size = 1) +
scale_y_continuous(
name = "Percent",
sec.axis = sec_axis(~.*10, name = "Discharge (cfs)")
) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
legend.text = element_text(size = 11),
legend.title = element_text(size = 14),
plot.title = element_text(size = 13)) +
scale_fill_identity(name = NULL,
breaks = c("sky blue"),
labels = c("SCA(%)"),
guide = "legend") +
scale_color_identity(name = NULL,
breaks = c("firebrick", "black", "purple", "dark green"),
labels = c("SCA Moving Avg", "Stream Discharge", "Q center of mass", "Last day of 50% SCA"),
guide = "legend") +
ggtitle("Percent Snow Cover and Stream Discharge for WY 2010") +
xlab("Day of Water Year")
### create plot for WY 2012
wy_2012_avg<- movingAverage(wy2012_perc, 30, TRUE)
perc_all_wy$mvg_avg_2012<- wy_2012_avg
which(wy_2012_avg >= 50) ## gives all position where the value is above 50%. Take the last one in the array
ggplot(perc_all_wy) +
geom_col(aes(x = DOWY, y = wy2012_perc, fill = "sky blue"), color = "sky blue") +
geom_line(aes(x = DOWY, y = wy_2012_avg, color = "firebrick"), size = 1.0) +
geom_line(aes(x = DOWY, y = wy2012_q/10, color = "black"), size = 1.0) +
geom_vline(aes(xintercept = 221, color = "purple"), size = 1) +
geom_vline(aes(xintercept = 165, color = "dark green"), size = 1) +
scale_y_continuous(
name = "Percent",
sec.axis = sec_axis(~.*10, name = "Discharge (cfs)")
) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
legend.text = element_text(size = 11),
legend.title = element_text(size = 14),
plot.title = element_text(size = 13)) +
scale_fill_identity(name = NULL,
breaks = c("sky blue"),
labels = c("SCA(%)"),
guide = "legend") +
scale_color_identity(name = NULL,
breaks = c("firebrick", "black", "purple", "dark green"),
labels = c("SCA Moving Avg", "Stream Discharge", "Q center of mass", "Last day of 50% SCA"),
guide = "legend") +
ggtitle("Percent Snow Cover and Stream Discharge for WY 2012") +
xlab("Day of Water Year")
#### Plot the above, but without the legend
ggplot(perc_all_wy) +
geom_col(aes(x = DOWY, y = wy2008_perc), color = "sky blue") + ## snow cover percent
geom_line(aes(x = DOWY, y = wy_2008_avg), color = "firebrick", size = 1.0) + ## moving avg line
geom_line(aes(x = DOWY, y = wy2008_q/10), color = "black", size = 1.0) + ## stream discharge
geom_vline(aes(xintercept = 253), color = "purple", size = 1) + ## the line that indicates the center of mass
geom_vline(aes(xintercept = 208), color = "dark green", size = 1) + ## the line that indicates the last day when the mvg avg of snow cover percent is 59% or greater
scale_y_continuous(
name = "Percent",
sec.axis = sec_axis(~.*10, name = "Discharge (cfs)")
) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
plot.title = element_text(size = 13)) +
ggtitle("Percent Snow Cover and Stream Discharge for WY 2008") +
xlab("Day of Water Year")
ggplot(perc_all_wy) +
geom_col(aes(x = DOWY, y = wy2010_perc), color = "sky blue") +
geom_line(aes(x = DOWY, y = wy_2010_avg), color = "firebrick", size = 1.0) +
geom_line(aes(x = DOWY, y = wy2010_q/10), color = "black", size = 1.0) +
geom_vline(aes(xintercept = 246), color = "purple", size = 1) +
geom_vline(aes(xintercept = 120), color = "dark green", size = 1) +
scale_y_continuous(
name = "Percent",
sec.axis = sec_axis(~.*10, name = "Discharge (cfs)")
) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
plot.title = element_text(size = 13)) +
ggtitle("Percent Snow Cover and Stream Discharge for WY 2010") +
xlab("Day of Water Year")
ggplot(perc_all_wy) +
geom_col(aes(x = DOWY, y = wy2012_perc), color = "sky blue") +
geom_line(aes(x = DOWY, y = wy_2012_avg), color = "firebrick", size = 1.0) +
geom_line(aes(x = DOWY, y = wy2012_q/10), color = "black", size = 1.0) +
geom_vline(aes(xintercept = 221), color = "purple", size = 1) +
geom_vline(aes(xintercept = 165), color = "dark green", size = 1) +
scale_y_continuous(
name = "Percent",
sec.axis = sec_axis(~.*10, name = "Discharge (cfs)")
) +
theme(axis.text = element_text(size = 11),
axis.title.x = element_text(size = 13),
axis.title.y = element_text(size = 13),
plot.title = element_text(size = 13)) +
ggtitle("Percent Snow Cover and Stream Discharge for WY 2012") +
xlab("Day of Water Year")
|
74e4ecb7b463d9b07ac72ea42bed392318aa427a | 368249e4edaaeb71b02075abcbaf46a2a9d8f997 | /R/udregninger opg5.R | f6310f03d7e5e4ffb644e004161c5665996cc41f | [] | no_license | Blikdal/Examchha511 | 256167c42cd1cc078fa50f1ca8b2d6b5fc713486 | 47b70e106cec785efae47def4b21070997ac9b63 | refs/heads/master | 2021-01-17T19:18:23.917600 | 2016-06-10T11:29:41 | 2016-06-10T11:29:41 | 60,843,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 230 | r | udregninger opg5.R | getDataPart(faithful) #the required data
eruption <- faithful[,1] #specifying eruption
KDE(eruption, method="naive" )
KDE(eruption, method="gaussian" )
densityplot(eruption, n=200)
densityplot(eruption, n=200, method="gaussian")
|
a30bc53fd4d3d10118d9d39a66e473bfabf8a76b | 587b2d1b95c14d4587c3a0210f0cf604140d9727 | /scripts/long/long_data.R | 5da7fce1984f0e5722f09d9cc0dd9f9d7c6273e4 | [] | no_license | mt-edwards/data-compression | 881c098b1acdc0110fb3208baa3ca98f13b2e623 | 3ada1b978ff5f9ad0a9920771cabbe3b6ac63120 | refs/heads/master | 2021-07-13T13:43:01.427051 | 2019-02-01T14:24:27 | 2019-02-01T14:24:27 | 139,557,116 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,103 | r | long_data.R | ############################
# Long Data. #
############################
# Command line arguments
# =======================
# - 1) Variable name.
# - 2) Ensemble size.
# - 3) AR order.
# - 4) MA order.
args = commandArgs(TRUE)
# Libraries.
# =======================
package_names = c("tidyverse", "plyr")
lapply(package_names, library, character.only = TRUE)
# Set Working Directory (bash scripts).
# ========================
setwd("/Users/matthewedwards/Sync/Projects/data-compression")
# Source functions.
# =======================
source("scripts/long/long_fun.R")
# Load data.
# =======================
load(paste0("data/", args[1], "/smf.r", args[2], ".p", args[3], ".q", args[4], ".R"))
load(paste0("data/", args[1], "/spec.r", args[2], ".p", args[3], ".q", args[4], ".R"))
# Normalised spectrum.
# ========================
nspec = aaply(spec, 1:2, long_nspec, smf = smf)
# Save files.
# =======================
save(nspec, file = paste0("data/", args[1], "/nspec.r", args[2], ".p", args[3], ".q", args[4], ".R"))
# Clear workspace.
# =======================
rm(list = ls())
|
065e1b39003548270cbf6c053444f8aeea6e2bc0 | c20ba83fc17b3db4b5ffec96a55e1a48a9e03796 | /fars_functions.R | 7495d3c9d8d15776f649dccff5475d7f1476a86e | [] | no_license | RG9303/Project-RG-Week2 | 27959fe374a17c58071b0f5b81cf4d561548b6c8 | 6e6e1c5b2602bcb1364b1cc99c04d83fd21cf644 | refs/heads/master | 2022-09-22T23:42:01.642521 | 2020-06-07T02:50:09 | 2020-06-07T02:50:09 | 270,108,092 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,057 | r | fars_functions.R | library(roxygen2)
#' @title fars_read
#' @description The function \code{fars_read} read a csv file if it exists and forwards the argument a data frame.
#' @param filename to enter a database with format csv.
#' @return if file exists, this function read the file and return a database as a data frame. If the extension
#' is diferent to csv, it can not read the file.
#' @details you need install packages like dplyr and readr before this or it may result in an error
#' @importFrom readr read_csv
#' @importFrom dplyr tbl_df
#' @example fars_read(filename = x)
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' @title make_filename
#' @description The function \code{make_file} transform a variable as a integer and print a character vector
#' containing a formatted combination of text and variable value.
#' @param year as a variable to tranform it in an integer value.
#' @return a character vector containing a formatted combination of text and variable value.
#' @details you need enter a number or it return a NA.
#' @importFrom base as.integer sprintf
#' @examples make_filename(2014)
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' @title fars_read_years
#' @description The function \code{fars_read_years} save the name of a specific data base according to a year,
#' read the ddatabase and transmute it drops existing variables as year.
#' @param years as a variable to tranform it in an integer value accross \code{make_filename}. It will be used
#' when it generate the name of a file with the function \code{fars_read}.
#' @return if the year is in the set: 2013, 2014 or 2015, it will transmute a database in a new data frame depending
#' on a specific month. Otherwise it will print a warning.
#' @details you need enter a number as a year contained in the set: 2013, 2014 or 2015 or it will return a warning
#' as a message.
#' @importFrom dplyr mutate select
#' @examples fars_read_years(2014)
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' @title fars_summarize_years
#' @description The function \code{fars_summarize_years} transmute the data frame by group, summarize and
#' spread a key-value pair across the variables year and n.
#' @param years as a variable to read the other functions and transmute the data frame.
#' @return a data frame by group of year and month, and summarize by count. It will print the head of the database.
#' @details you need install the library tidyr and conserve the format of the variables.
#' @importFrom dplyr bind_rows group_by summarize
#' @importFrom tidyr spread
#' @examples fars_summarize_years(2014)
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
#' @title fars_map_state
#' @description The function \code{fars_map_state} transform the principal database and add
#' conditionals for some variables. Plot a map with a specific lat and long.
#' @param state.num as a variable that represent a state.
#' @param year as a variable to tranform it in an integer value.
#' @return if number of a state is unique and it is contained in the variable STATE of the data
#' it will make a data frame with this filter, with conditionals to transform NAs in the
#' variables LONGITUD AND LATITUDE and print a map with this location. Otherwise print a
#' message "no accidents to plot" and return an invisible object.
#' @details you need to install the package "map" and specify a number of a state.
#' @importFrom dplyr filter
#' @importFrom base message invisible
#' @importFrom maps map
#' @importFrom graphics points
#' @examples fars_map_state(19, 2014)
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
d88663824c883a75adc8be7163c05939c461ba82 | 27b68e887cbba98bab7d90ba6839f69cbd138720 | /adp_analysis/xfp_boxplot.R | 2478e9ec57a38dd490b190c8398eb07a62efbe02 | [] | no_license | lbuckheit/nfl | 2266f954618608778045db86afc6bea0504241ee | e972aa3af3b6640fa32c484622d145b0cd012d8b | refs/heads/master | 2023-08-20T15:26:08.065041 | 2021-09-19T07:40:02 | 2021-09-19T07:40:02 | 301,013,241 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,845 | r | xfp_boxplot.R | library(tidyverse)
library(ggrepel)
library(ggimage)
library(nflfastR)
library(dplyr)
library(ggplot2)
library(ggrepel)
library(stringr)
options(scipen = 9999)
source("utils/nfl_utils.R")
### Generate boxplots of expected FP ###
# TODO - Split this into RB and WR files
### Background Work ###
# Define variables
SEASON_TO_ANALYZE <- 2020
START_WEEK <- 1
# TODO - Remember that now seasons have 18 weeks
END_WEEK <- 17
PTS_PER_RECEPTION <- 1
# Load ADP data
adp_data <- read.csv(file = "helpful_csvs/2021_clean_adp_data.csv") %>%
select(gsis_id, adp)
# Grab the rosters for use in filtering by position
players <- nflfastR::fast_scraper_roster(SEASON_TO_ANALYZE) %>%
subset(select = c(team, position, first_name, last_name, gsis_id))
# Load annual PBP Data
pbp_df <- load_pbp(SEASON_TO_ANALYZE) %>%
filter(season_type == "REG", week >= START_WEEK, week <= END_WEEK)
### Expected points from rushing, grouped by game ###
xfp_rushes <- calculate_rush_xfp_by_game(pbp_df)
### Expected points from receiving ###
# Add xyac data to pbp
pbp_with_xyac <- add_xyac_to_pbp(pbp_df)
# Calculate xfp using xyac data
xfp_targets <- calculate_rec_xfp_by_game(pbp_with_xyac, PTS_PER_RECEPTION)
# Prune the dataframe only to what's necessary
concise_xfp_targets <- xfp_targets %>%
select(
game_id,
player = receiver,
gsis_id = receiver_id,
rec_games = games,
exp_rec_pts = exp_pts,
actual_rec_pts = pts
)
## Boxplots for receivers
# Grab only the receivers above a certain season points threshold
relevant_players <- concise_xfp_targets %>%
group_by(gsis_id) %>%
summarize(total_xfp = sum(exp_rec_pts)) %>%
filter(total_xfp > 50)
# Filter by receiver type if you wish
relevant_receivers <- merge(relevant_players, players) %>%
filter(position == "TE")
# Create list of season-long/other data to merge with the game-by-game data
relevant_receivers_with_adp <- merge(relevant_receivers, adp_data)
# Create a df of all the games by relevant receivers
receivers_to_plot = merge(concise_xfp_targets, relevant_receivers_with_adp)
# Plot
# To order by avg. xfp per game use reorder(player, -exP-rec_pts)
# To order by total season xfp, use reorder(player, -total_xfp)
# To order by IQR size use reorder(player, exp_rec_pts, IQR)
# To order by ADP use reorder(player, adp)
ggplot(receivers_to_plot, aes(x=reorder(player, adp), y=exp_rec_pts, label=player)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = -90)) +
labs(x = "Player",
y = str_glue("Exp.{PTS_PER_RECEPTION}PPR Pts."),
title = str_glue("{SEASON_TO_ANALYZE} Expected {PTS_PER_RECEPTION}PPR Pts. Boxplots"),
caption = "Via nflFastR"
)
## Boxplots for RBs
# Prune the dataframe only to what's necessary
concise_xfp_rushes <- xfp_rushes %>%
select(
player = rusher,
gsis_id,
game_id,
rush_games = games,
exp_rush_pts,
actual_rush_pts
)
# Get the total (season-long) combined rush/rec xfp for players (for use in determining relevant players and graph ordering)
combined_xfp_aggregate <- dplyr::bind_rows(concise_xfp_rushes, concise_xfp_targets) %>%
group_by(gsis_id, player) %>%
summarise(total_xfp = sum(exp_rec_pts, exp_rush_pts, na.rm=TRUE))
# Capture only players above a certain threshold for eventual graphing
players_meeting_points_threshold <- combined_xfp_aggregate %>%
filter(total_xfp > 125) %>%
select(player, total_xfp)
# Create list of season-long/other data to merge with the game-by-game data
rbs_to_merge <- merge(players_meeting_points_threshold, adp_data)
# Build a list of each player's combined rush/rec xfp on a game-by-game basis
combined_xfp_by_game <- dplyr::bind_rows(concise_xfp_rushes, concise_xfp_targets) %>%
group_by(gsis_id, player, game_id) %>%
summarise(
xfp = sum(exp_rec_pts, exp_rush_pts, na.rm=TRUE)
)
# Combine a list of all running back with a list of all players meeting the graphing threshold
# to produce a list of all running backs that will be graphed
relevant_rbs <- merge(rbs_to_merge, players) %>%
filter(position == "RB") %>%
select(gsis_id, player, total_xfp, adp)
# Then merge the above list with the list of all games to get all games played by relevant RBs
rb_xfp_by_game <- merge(combined_xfp_by_game, relevant_rbs)
# Plot
# To order by avg. xfp per game use reorder(player, -xfp)
# To order by total season xfp, use reorder(player, -total_xfp)
# To order by IQR size use reorder(player, xfp, IQR)
# To order by ADP use reorder(player, adp)
ggplot(rb_xfp_by_game, aes(x=reorder(player, -xfp), y=xfp, label=player)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = -90)) +
labs(x = "Player",
y = str_glue("Exp.{PTS_PER_RECEPTION}PPR Pts."),
title = str_glue("{SEASON_TO_ANALYZE} Expected {PTS_PER_RECEPTION}PPR Pts. Boxplots"),
caption = "Via nflFastR"
)
|
b8694448c65e770641fdfaffc1478a8a2d7568bf | fe906038c1bb5cd91cea4cba996d99822bbf3a33 | /prophet_test.R | 117ec51e07d1828903a438b95e32f6623e301fe4 | [] | no_license | m0hits/prophet_exploration_R | a3352bae5df27b2fa8b0f1ded1afcbfd86449900 | 5ed027df17f519b2092d49bddf2c4db1539b988c | refs/heads/master | 2020-07-31T18:14:47.881311 | 2019-09-24T22:23:45 | 2019-09-24T22:23:45 | 210,706,999 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,926 | r | prophet_test.R | #### Session Setup ----
rm(list = ls())
gc()
set.seed(786)
Time = Sys.time()
#### Packages ----
list.of.packages <- c("tidyverse",
"forecast",
"purrr",
"broom",
"readxl",
"writexl",
"lubridate",
"prophet",
"dygraphs")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"] )]
if(length(new.packages)) install.packages(new.packages)
for(i in list.of.packages){
library(i, character.only = TRUE)
}
#### Input variables ----
test_period = 5
#### Read Input data ----
raw <- read_xlsx("Raw_Data.xlsx")
#### Data Processing ----
# raw <- raw %>%
# spread(key = Period, value = Sales, fill = 0) %>%
# gather(key = "Period", value = "Sales", 2:ncol(.))
raw_nest <- raw %>%
group_by(Id) %>%
nest(-Id)
#### Generate ts objects ----
ts_create <- function(x){
ts(x$Sales,
start = c(str_sub(x$Period[[1]], 1, 4) %>% as.numeric(), str_sub(x$Period[[1]], 5, 6) %>% as.numeric()),
frequency = 12)
}
raw_nest <- raw_nest %>%
mutate(data_ts = map(data, ts_create))
#### Model Definition ----
f_prophet <- function(x, h){
dat = data.frame(ds = as_date(time(x)), y = as.matrix(x))
prophet(dat)
}
#### Apply model ----
raw_nest <- raw_nest %>%
mutate(prophet_fit = map(data_ts, f_prophet))
#### Forecasting using the model ----
f_predict <- function(x){
df <- make_future_dataframe(x, 24, freq = 'month', include_history = T)
predict(x, df)
}
raw_nest <- raw_nest %>%
mutate(forecast = map(prophet_fit, f_predict))
#### Visualization ----
dyplot.prophet(raw_nest$prophet_fit[[1]], raw_nest$forecast[[1]])
dyplot.prophet(raw_nest$prophet_fit[[2]], raw_nest$forecast[[2]])
dyplot.prophet(raw_nest$prophet_fit[[3]], raw_nest$forecast[[3]])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.