blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5846b59c404700653dc838e18f7b9275f4db2267 | 5e8bb97ee1805d7c68aeb2ebcd61718ed20ae282 | /odds_ratio.R | 14ccf7e611aef5736cf9275587ccec0eb940d59b | [] | no_license | crglaser/swing-miss-percentages | 2879eaca136be6f59789ff83289b215da6997e9a | 629129682d4a3568f5310175c9f5323e5df55096 | refs/heads/master | 2021-01-10T13:16:28.265996 | 2016-01-14T00:21:40 | 2016-01-14T00:21:40 | 49,166,906 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 370 | r | odds_ratio.R | odds_ratio <- function(hitter_probability, pitcher_probability, league_probability){
hitter_odds <- probability_to_odds(hitter_probability)
pitcher_odds <- probability_to_odds(pitcher_probability)
league_odds <- probability_to_odds(league_probability)
ratio <- (hitter_odds * pitcher_odds) / league_odds
percentage <- ratio / (ratio + 1)
return(percentage)
} |
28d5e177bfe16c127b7c81f176eb47969d3894f1 | 92cf452f280a949e9d8163bbcd2c07a12c045688 | /shinyRcode/R/reloadData.R | 846a596fdeb0fc9c514628e8907a99b3ac9d394a | [] | no_license | SWS-Methodology/faoswsFisheriesSUAFBSdocumentation | 8b59d60b16431ef4688e23dcdc3f44889a09a486 | c5a42a5c2b3dc7d25aeda15d1d3245a9ae910dfb | refs/heads/master | 2021-05-19T10:53:08.144166 | 2020-11-02T14:46:12 | 2020-11-02T14:46:12 | 251,660,525 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,006 | r | reloadData.R |
reloadData <- function(data, keycountry, minyear, maxyear, keydomain, keydataset,
keygeo = 'geographicAreaM49_fi', keytime = 'timePointYears',
keyelement= 'measuredElementSuaFbs',
keyitem = 'measuredItemFaostat_L2'){
sel_years <- as.character(as.numeric(minyear):as.numeric(maxyear))
if(nrow(data) == 0){
Key <- DatasetKey(domain = keydomain,
dataset = keydataset,
dimensions = list(geographicAreaM49_fi = Dimension(name = keygeo, keys = keycountry),
measuredElementSuaFbs = Dimension(name = keyelement,
GetCodeList(keydomain,
keydataset,
keyelement )[,code]),
measuredItemFaostat_L2 = Dimension(name = keyitem,
GetCodeList(keydomain,
keydataset,
keyitem )[,code]),
timePointYears = Dimension(name = keytime, keys = sel_years )))
withProgress(message = 'Data loading in progress',
value = 0, {
Sys.sleep(0.25)
incProgress(0.25)
dataKey <- GetData(Key)
Sys.sleep(0.75)
incProgress(0.95)
})
data <- dataKey
return(data)
} else if(nrow(data) > 0 &
unique(data$geographicAreaM49_fi) != keycountry |
min(unique(data$timePointYears)) != minyear |
max(unique(data$timePointYears)) != maxyear){
Key <- DatasetKey(domain = keydomain,
dataset = keydataset,
dimensions = list(geographicAreaM49_fi = Dimension(name = keygeo, keys = keycountry),
measuredElementSuaFbs = Dimension(name = keyelement,
GetCodeList(keydomain,
keydataset,
keyelement )[,code]),
measuredItemFaostat_L2 = Dimension(name = keyitem,
GetCodeList(keydomain,
keydataset,
keyitem )[,code]),
timePointYears = Dimension(name = keytime, keys = sel_years )))
withProgress(message = 'Data loading in progress',
value = 0, {
Sys.sleep(0.25)
incProgress(0.25)
dataKey <- GetData(Key)
Sys.sleep(0.75)
incProgress(0.95)
})
data <- dataKey
return(data)
} else {data <- NULL}
}
reloadDataToken <- function(data, keycountry, minyear, maxyear, keydomain, keydataset, keytoken,
keygeo = 'geographicAreaM49_fi', keytime = 'timePointYears',
keyelement= 'measuredElementSuaFbs',
keyitem = 'measuredItemFaostat_L2'){
sel_years <- as.character(as.numeric(minyear):as.numeric(maxyear))
if(nrow(data) == 0){
if(localrun){
if(CheckDebug()){
library(faoswsModules)
SETTINGS = ReadSettings("sws.yml")
R_SWS_SHARE_PATH = SETTINGS[["share"]]
SetClientFiles(SETTINGS[["certdir"]])
GetTestEnvironment(baseUrl = SETTINGS[["server"]],
token = keytoken)
}
} else {
R_SWS_SHARE_PATH = "Z:"
SetClientFiles("/srv/shiny-server/.R/QA/")
GetTestEnvironment(baseUrl = "https://swsqa.aws.fao.org:8181",
token = keytoken)
}
Key <- DatasetKey(domain = keydomain,
dataset = keydataset,
dimensions = list(geographicAreaM49_fi = Dimension(name = keygeo, keys = keycountry),
measuredElementSuaFbs = Dimension(name = keyelement,
GetCodeList(keydomain,
keydataset,
keyelement )[,code]),
measuredItemFaostat_L2 = Dimension(name = keyitem,
GetCodeList(keydomain,
keydataset,
keyitem )[,code]),
timePointYears = Dimension(name = keytime, keys = sel_years )))
withProgress(message = 'Data loading in progress',
value = 0, {
Sys.sleep(0.25)
incProgress(0.25)
dataKey <- GetData(Key)
Sys.sleep(0.75)
incProgress(0.95)
})
data <- dataKey
return(data)
} else if(nrow(data) > 0 &
unique(data$geographicAreaM49_fi) != keycountry |
min(unique(data$timePointYears)) != minyear |
max(unique(data$timePointYears)) != maxyear){
if(localrun){
if(CheckDebug()){
library(faoswsModules)
SETTINGS = ReadSettings("sws.yml")
R_SWS_SHARE_PATH = SETTINGS[["share"]]
SetClientFiles(SETTINGS[["certdir"]])
GetTestEnvironment(baseUrl = SETTINGS[["server"]],
token = keytoken)
}
} else {
R_SWS_SHARE_PATH = "Z:"
SetClientFiles("/srv/shiny-server/.R/QA/")
GetTestEnvironment(baseUrl = "https://swsqa.aws.fao.org:8181",
token = keytoken)
}
Key <- DatasetKey(domain = keydomain,
dataset = keydataset,
dimensions = list(geographicAreaM49_fi = Dimension(name = keygeo, keys = keycountry),
measuredElementSuaFbs = Dimension(name = keyelement,
GetCodeList(keydomain,
keydataset,
keyelement )[,code]),
measuredItemFaostat_L2 = Dimension(name = keyitem,
GetCodeList(keydomain,
keydataset,
keyitem )[,code]),
timePointYears = Dimension(name = keytime, keys = sel_years )))
withProgress(message = 'Data loading in progress',
value = 0, {
Sys.sleep(0.25)
incProgress(0.25)
dataKey <- GetData(Key)
Sys.sleep(0.75)
incProgress(0.95)
})
data <- dataKey
return(data)
} else {data <- NULL}
}
|
3f7cec553c5a3c59ab6aa2d3b6b70c90c55cc122 | 23691014ae69404742f014d19109b0d3075f871a | /man/fit-method.Rd | 21a85a49335786ae5f5021152d6a77870493876c | [] | no_license | zrmacc/CICs | d43c14c451344adcb6ff86be63b2195616031d4c | 28ad3929a27da57046e054bc7f07818f5199c521 | refs/heads/master | 2023-04-17T08:49:18.313907 | 2021-05-02T19:54:19 | 2021-05-02T19:54:19 | 274,303,685 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 348 | rd | fit-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class.R
\name{show,compCICs-method}
\alias{show,compCICs-method}
\title{Show Method for Compare CICs Object}
\usage{
\S4method{show}{compCICs}(object)
}
\arguments{
\item{object}{An object of class \code{compCICs}.}
}
\description{
Show Method for Compare CICs Object
}
|
ae758e1315c6f30a978b5829ab600e2faa43f5ec | e2af2bedf379072f012ef2847be39118192fe161 | /SFrestaurantscores/global.R | f863abc222413f2cb3ed9a9823c881b433f8ca25 | [] | no_license | hlau117/Project1-ShinyApp | de23cca85d8b3f3a65223ab90c8b558c168a3491 | 5023b98a8fcd2d54faa231824bbbcb9fa99f91d7 | refs/heads/master | 2021-07-10T16:43:56.665856 | 2017-10-13T14:53:46 | 2017-10-13T14:53:46 | 106,331,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,757 | r | global.R | library(xts)
library(dygraphs)
library(plyr)
library(dplyr)
load("total_postcode.rda")
load("Spatial_Zipcodes.rda")
SF_rest= read.csv("Restaurant_Scores.csv",sep=',')
#rename columns
SF_rest= SF_rest%>%select(rest_id=business_id,
name=business_name,
address=business_address,
postcode= business_postal_code,
lat=business_latitude,
long=business_longitude,
ins_id= inspection_id,
fulldate=inspection_date,
score= inspection_score,
descr= violation_description,
risk_cat= risk_category)
#remove rows with missing score values & position(lat & long) values
SF_rest=SF_rest %>% filter(is.na(lat)!=TRUE)
SF_rest=SF_rest %>% filter(is.na(score)!=TRUE)
#change date to date object and extract day, month, year into each seperate column
SF_rest$fulldate=as.Date(SF_rest$fulldate, format = "%m/%d/%Y")
SF_rest$mmyydate=format(SF_rest$fulldate, "%Y-%m")
SF_rest$day= format(SF_rest$fulldate, "%d")
SF_rest$month= format(SF_rest$fulldate, "%m")
SF_rest$year= format(SF_rest$fulldate, "%Y")
#rename risk factor
SF_rest$risk_cat= gsub("^$","No Risk", SF_rest$risk_cat)
#clean post code factor
SF_rest= SF_rest %>% filter (!(postcode %in% c("00000","Ca","CA","","941"))) %>% droplevels()
SF_rest$postcode=gsub(".*2019.*", "94110", SF_rest$postcode)
#add a none feature for the day date
unique_days=unique(sort(SF_rest[,"day"], decreasing=FALSE))
unique_days=append(unique_days,"None")
#add NA for violation description of No risks category level
levels(SF_rest$descr)[levels(SF_rest$descr)==""] = "NA"
save(SF_rest, file = "Clean_SF_rest.rda")
|
fb8cabd40f68cacb57e320e8cd746e0448aedd49 | 3e8ad7252429b4f19795dab539308410804d056a | /functions.R | 71f1238240d2eeeae226c10368630576f0ee3110 | [] | no_license | dainiuxt/RprogrammingLectures | e0d0e5e79830b8265a43d58358cacc38b369b011 | e71ba3b83ef2628881f66a66a2fc0b8847487cdf | refs/heads/master | 2020-12-25T18:23:09.717279 | 2014-06-14T20:56:10 | 2014-06-14T20:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 296 | r | functions.R | #lapply
x <- list(a = 1:5, b = rnorm(10))
lapply(x, mean)
x <- list(a = 1:4, b = rnorm(10), c = rnorm(20, 1), d = rnorm(100, 5))
x<- 1:10
lapply(x, runif)
lapply(x, runif, min=0, max =10)
x <- list(a = matrix(1:4, 2, 2), b = matrix(1:6, 3, 2))
lapply(x, function(elt) elt[,1])
sapply(x, mean) |
0e1473cf40623abf7d050c54ed45e59898505086 | 2efe10652a9d1d4f01219a5d7f8d429896660cfb | /signit-pipeline/signit_summary_table.R | 47cf1f1761c7c8fc164229e3e41a022666cde25e | [] | no_license | eyzhao/bio-pipelines | 9416b33f280e33ecf56c916833ee13e36f64c56d | 52e4e40e7b3f2da24675c422d58b7b373b7024a3 | refs/heads/master | 2021-03-24T09:48:16.445178 | 2019-12-04T17:25:10 | 2019-12-04T17:25:10 | 105,455,771 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,240 | r | signit_summary_table.R | ' signit_summary_table.R
Usage: signit_summary_table.R -i INPUT -o OUTPUT [ -s SIGNIT --fraction ]
Options:
-i --input INPUT Path to serialized output from SignIT (.Rds file)
-o --output OUTPUT Path to output summary stats table (.tsv file)
-s --signit SIGNIT Path to SignIT R library files. If not provided, will assume that SignIT is installed
and load the package using library(signit) instead.
--fraction If using this flag, summary table will report all values as exposure fractions instead
of number of mutations.
' -> doc
library(docopt)
args <- docopt(doc)
if (is.null(args[['signit']])) {
library(signit)
} else {
library(devtools)
library(tidyverse)
library(rjags)
library(nnls)
library(dbscan)
library(Rtsne)
load_all(args[['signit']])
}
message('Reading SignIT data')
exposures <- readRDS(args[['input']])
summary_table <- get_exposure_summary_table(exposures, alpha = c(0, 0.05, 0.5), fraction = args[['fraction']])
message('Created summary table.')
summary_table %>% write_tsv(args[['output']])
print(paste0('Results saved as TSV at ', args[['output']]))
|
9b6641683cedfcc3b8739b243376eb8e29a6c72c | bffd64b5c5f55b0b9791c99e7f245a49a7a05b7a | /Movie Recommender System/AppDeployCode/server.R | 998b348b0bd24a4e5fa16a815b55c97456654785 | [] | no_license | vkk2/schoolprojects | dc8dbe0915437098c2264af3c1978674b2016324 | 1a97a62bda724abe2aa6e07603471665cbc79f84 | refs/heads/main | 2023-02-28T01:32:10.800144 | 2021-02-04T16:50:28 | 2021-02-04T16:50:28 | 322,464,282 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,519 | r | server.R | ## server.R
library(recommenderlab)
library(Matrix)
# load functions
#source('functions/cf.R')
#source('functions/cf_algorithm.R') # collaborative filtering
#source('functions/similarity_measures.R') # similarity measures
# define functions
get_user_ratings = function(value_list) {
dat = data.table(MovieID = sapply(strsplit(names(value_list), "_"),
function(x) ifelse(length(x) > 1, x[[2]], NA)),
Rating = unlist(as.character(value_list)))
dat = dat[!is.null(Rating) & !is.na(MovieID)]
dat[Rating == " ", Rating := 0]
dat[, ':=' (MovieID = as.numeric(MovieID), Rating = as.numeric(Rating))]
dat = dat[Rating > 0]
}
# read in data
myurl = "https://liangfgithub.github.io/MovieData/"
movies = readLines(paste0(myurl, 'movies.dat?raw=true'))
movies = strsplit(movies, split = "::", fixed = TRUE, useBytes = TRUE)
movies = matrix(unlist(movies), ncol = 3, byrow = TRUE)
movies = data.frame(movies, stringsAsFactors = FALSE)
colnames(movies) = c('MovieID', 'Title', 'Genres')
movies$MovieID = as.integer(movies$MovieID)
movies$Title = iconv(movies$Title, "latin1", "UTF-8")
small_image_url = "https://liangfgithub.github.io/MovieImages/"
movies$image_url = sapply(movies$MovieID,
function(x) paste0(small_image_url, x, '.jpg?raw=true'))
####
genre_ratingdf=read.csv("data/genre_rating.csv",stringsAsFactors = FALSE)
genre_list = c("Action", "Adventure", "Animation",
"Children's", "Comedy", "Crime",
"Documentary", "Drama", "Fantasy",
"Film-Noir", "Horror", "Musical",
"Mystery", "Romance", "Sci-Fi",
"Thriller", "War", "Western")
shinyServer(function(input, output, session) {
# show the books to be rated
output$ratings <- renderUI({
num_rows <- 20
num_movies <- 6 # movies per row
lapply(1:num_rows, function(i) {
list(fluidRow(lapply(1:num_movies, function(j) {
list(box(width = 2,
div(style = "text-align:center", img(src = movies$image_url[(i - 1) * num_movies + j], height = 150)),
#div(style = "text-align:center; color: #999999; font-size: 80%", books$authors[(i - 1) * num_books + j]),
div(style = "text-align:center", strong(movies$Title[(i - 1) * num_movies + j])),
div(style = "text-align:center; font-size: 150%; color: #f0ad4e;", ratingInput(paste0("select_", movies$MovieID[(i - 1) * num_movies + j]), label = "", dataStop = 5)))) #00c0ef
})))
})
})
# Calculate recommendations when the sbumbutton is clicked
df <- eventReactive(input$btn, {
withBusyIndicatorServer("btn", { # showing the busy indicator
# hide the rating container
useShinyjs()
jsCode <- "document.querySelector('[data-widget=collapse]').click();"
runjs(jsCode)
# get the user's rating data
value_list <- reactiveValuesToList(input)
user_ratings <- get_user_ratings(value_list)
user_results = (1:10)/10
user_predicted_ids = 1:10
user_ratings=na.omit(user_ratings)
#userid=rep(9999,nrow(user_ratings))
#print(nrow(user_ratings))
# print(user_ratings[1:5])
#user_ratings=data.frame("MovieID"=user_ratings$MovieID[1:nrow(user_ratings)],"Rating"=user_ratings$Rating[1:nrow(user_ratings)])
#userdf=data.frame("UserID"=rep(9999,nrow(user_ratings)))
#newuser=cbind(userdf,user_ratings)
#print(head(newuser))
#print(tail(newuser))
#print(nrow(newuser))
#print(ncol(newuser))
#print(newuser$Rating[1:5])
Rmat_ex=matrix(NA,3706)
for (i in 1:nrow(user_ratings)){
Rmat_ex[user_ratings$MovieID[i]]=user_ratings$Rating[i]
}
#Rmat_ex[1]=5
#Rmat_ex[2]=5
# print(Rmat_ex[1:5])
Rmat_ex <- as(t(Rmat_ex), "realRatingMatrix")
trainedmodel=readRDS("data/trainedmodel.rds")
#Rmat_ex=gettestmatrix(newuser)
model3_predtop_ex <- predict(object = trainedmodel, newdata = Rmat_ex, n = 10,
type = "topNList")
recom_list=as(model3_predtop_ex,"list")
# print(recom_list)
recom_movieid=as.integer(substring(recom_list[[1]],first=2))
#print(recom_movieid)
#recom_movieid=as.integer(recom_list[[1]])
recom_results <- data.table(MovieID=recom_movieid)
}) # still busy
}) # clicked on button
# display the recommendations
output$results <- renderUI({
num_rows <- 2
num_movies <- 5
recom_result <- df()
lapply(1:num_rows, function(i) {
list(fluidRow(lapply(1:num_movies, function(j) {
box(width = 2, status = "success", solidHeader = TRUE, title = paste0("Rank ", (i - 1) * num_movies + j),
div(style = "text-align:center",
a(img(src = paste0(small_image_url, recom_result$MovieID[(i - 1) * num_movies + j], '.jpg?raw=true'), height = 150))
),
div(style="text-align:center; font-size: 100%",
strong(movies$Title[which(movies$MovieID==recom_result$MovieID[(i - 1) * num_movies + j])])
)
)
})))
}) # rows
}) # renderUI function
#adding
# category <- c("Action", "Adventure", "Animation")
# population <- c(3,8,4)
#
# df2 <- data.frame(category,population)
#
# df_subset <- reactive({
# a <- subset(df2, category == input$state)
# return(a)
#})
#output$table1 <- renderTable(df_subset()) #Note how df_subset() was used and not df_subset
##########################################################
# This is the By genre portion for server.r
# output name is genreresults
#
###########################################################
output$genreresults <- renderUI({
num_rows <- 2
num_movies <- 5
#genre_ratingdf=genre_ratingdf %>% filter(Action==1) %>% top_n(5, ave_ratings)
genre_selected=input$state
if(genre_selected=="Action")
genre_ratingdf=genre_ratingdf %>% filter(Action==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Adventure")
genre_ratingdf=genre_ratingdf %>% filter(Adventure==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Animation")
genre_ratingdf=genre_ratingdf %>% filter(Animation==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Children's")
genre_ratingdf=genre_ratingdf %>% filter(Children.s==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Comedy")
genre_ratingdf=genre_ratingdf %>% filter(Comedy==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Crime")
genre_ratingdf=genre_ratingdf %>% filter(Crime==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Documentary")
genre_ratingdf=genre_ratingdf %>% filter(Documentary==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Drama")
genre_ratingdf=genre_ratingdf %>% filter(Drama==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Fantasy")
genre_ratingdf=genre_ratingdf %>% filter(Fantasy==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Film-Noir")
genre_ratingdf=genre_ratingdf %>% filter(Film.Noir==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Horror")
genre_ratingdf=genre_ratingdf %>% filter(Horror==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Musical")
genre_ratingdf=genre_ratingdf %>% filter(Musical==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Mystery")
genre_ratingdf=genre_ratingdf %>% filter(Mystery==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Romance")
genre_ratingdf=genre_ratingdf %>% filter(Romance==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Sci-Fi")
genre_ratingdf=genre_ratingdf %>% filter(Sci.Fi==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Thriller")
genre_ratingdf=genre_ratingdf %>% filter(Thriller==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="War")
genre_ratingdf=genre_ratingdf %>% filter(War==1) %>% top_n(5, ave_ratings)
else if (genre_selected=="Western")
genre_ratingdf=genre_ratingdf %>% filter(Western==1) %>% top_n(5, ave_ratings)
list(fluidRow(lapply(1:5, function(j) {
box(width = 2, status = "success", solidHeader = TRUE, title = paste0("Rank ", j ),
div(style = "text-align:center",
a(img(src = paste0(small_image_url, genre_ratingdf$MovieID[j], '.jpg?raw=true'), height = 150))
),
div(style="text-align:center; font-size: 100%",
strong(genre_ratingdf$Title[j])
)
)
})))
# lapply(1:num_rows, function(i) {
# list(fluidRow(lapply(1:num_movies, function(j) {
# box(width = 2, status = "success", solidHeader = TRUE, title = paste0("Rank ", (i - 1) * num_movies + j),
#
# div(style = "text-align:center",
# a(img(src = paste0(small_image_url, genre_ratingdf$MovieID[i], '.jpg?raw=true'), height = 150))
# ),
# div(style="text-align:center; font-size: 100%",
# strong(genre_ratingdf$Title[i])
# )
#
# )
# }))) # columns
# })
#df1=genre_ratingdf %>% filter(Action==1))
#recom_result <- genre_ratingdf
# fluidRow({
# div(style = "text-align:center",
# a(img(src = paste0(small_image_url, 318, '.jpg?raw=true', height = 150)))
# )
# })
# lapply(1:num_rows, function(i) {
# list(fluidRow(lapply(1:num_movies, function(j) {
# box(width = 2, status = "success", solidHeader = TRUE, title = paste0("Rank ", (i - 1) * num_movies + j),
#
# div(style = "text-align:center",
# a(img(src = movies$image_url[recom_result$MovieID[(i - 1) * num_movies + j]], height = 150))
# ),
# div(style="text-align:center; font-size: 100%",
# strong(movies$Title[recom_result$MovieID[(i - 1) * num_movies + j]])
# )
#
# )
# }))) # columns
# }) # rows
}) # renderUI function
}) # server function |
8399feac9c382590f36bdcdfd3255d5c3b0122ea | 9509cefb9198144bde21774fdd5b3c7a68a005a7 | /man/targetsmet.Rd | 73c63104701d3b9fd0c66ffd379f66fee041d2ca | [] | no_license | jeffreyhanson/marxan | cc5d39f4a0980f8a7c4d1cf041a500721b03c198 | fff42df08ac0a8ad1f762f6402d15698babf1dff | refs/heads/master | 2021-08-07T12:54:25.036732 | 2016-11-03T04:53:28 | 2016-11-03T04:53:28 | 29,377,383 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 929 | rd | targetsmet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/MarxanResults.R, R/MarxanSolved.R
\name{targetsmet}
\alias{targetsmet}
\alias{targetsmet.MarxanResults}
\alias{targetsmet.MarxanSolved}
\title{Extract information on whether solutions have met targets}
\usage{
targetsmet(x, ...)
\method{targetsmet}{MarxanResults}(x, y = NULL)
\method{targetsmet}{MarxanSolved}(x, y = NULL)
}
\arguments{
\item{x}{"MarxanResults" or "MarxanSolved" object.}
\item{...}{not used.}
\item{y}{"NULL" to return all values, "integer" 0 to return values for best solution, "integer" value greater than 0 for \code{y}'th solution value.}
}
\value{
"matrix" or "logical" vector depending on arguments.
}
\description{
This function reports whether a solution has met the targets for each species in a solution.
}
\seealso{
\code{\link{MarxanResults-class}}, \code{\link{MarxanSolved}}, \code{\link{marxan}}
}
|
60f5446d2c582e88e333f677ce3b6dc9a9cb609b | 9a7b9c3dfcffd0c0562650d5c5a0d6fa9890c1cb | /R/single_term_digital_filter.R | 850552271e6b4f7378d6c003bf92019ccbbf8c3d | [
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | smwesten-usgs/recharge | 8ea2470e024a1451b46b030e3ee489797d81a871 | aee45d90d83e6ae1646d391d4c7a26af562fc407 | refs/heads/master | 2021-07-16T05:12:33.133309 | 2021-03-02T17:25:36 | 2021-03-02T17:25:36 | 101,231,984 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,504 | r | single_term_digital_filter.R | #' Baseflow Separation by use of a single-term digital filter.
#'
#' Extract baseflow from a daily streamflow record using the method described by
#'Nathan and MacMahon (1990).
#'
#' @param date vector of dates corresponding to each \code{discharge}, should be of class "Date."
#' Missing values are not permitted.
#' @param discharge the daily streamflow to be separated missing values are not permitted
#'within the time specified by \code{Start} and \code{end}.
#' @param alpha filter parameter.
#' @param STAID the station identifier for the data.
#'
#' @return an object of class "baseflow" and inherits class "data.frame" of the selected data,
#'a data frame of the baseflow information, and other information about the analysis.
#'
#' @keywords baseflow
#' @examples
#'
#'\dontrun{
#'}
#'@export
bf_single_term_filter <- function(date, discharge, alpha, STAID="Unknown") {
## Start of code: initial processing
STAID <- as.character(STAID[1L])
discharge <- pmax(discharge, 0.000001 ) # Convert 0 to a small number
if(any(is.na(discharge)))
stop("Missing values in discharge vector.")
if(any(diff(as.double(date)) != 1))
stop("Date data are not continuous.")
n <- length( discharge )
baseflow <- rep(0.0, n)
quickflow <- rep(0.0, n)
forward_run <- function( quickflow, discharge, alpha ) {
n <- length( discharge )
for ( i in 2:n) {
quickflow[i] <- alpha * quickflow[i-1] + ( 1. + alpha ) / 2. * ( discharge[i] - discharge[i-1] )
}
quickflow <- pmin( quickflow, discharge )
quickflow <- pmax( quickflow, rep(0.0, n) )
return(quickflow)
}
reverse_run <- function( quickflow, discharge, alpha ) {
n <- length( discharge )
for ( i in (n-1):1) {
quickflow[i] <- alpha * quickflow[i+1] + ( 1. + alpha ) / 2. * ( discharge[i] - discharge[i+1] )
}
quickflow <- pmin( quickflow, discharge )
quickflow <- pmax( quickflow, rep(0.0, n) )
return(quickflow)
}
quickflow <- forward_run( quickflow, discharge, alpha )
quickflow <- reverse_run( quickflow, discharge, alpha )
quickflow <- forward_run( quickflow, discharge, alpha )
baseflow <- discharge - quickflow
retval <- data.frame( date=date, discharge=discharge, baseflow=round( baseflow, 3),
quickflow = round( quickflow, 3 ) )
if(!is.null(STAID))
attr(retval, "STAID") <- STAID
attr(retval, "type") <- "digital"
class(retval) <- c("single_term_digital", "data.frame")
return(retval)
}
|
0b54b064fe14e372d68cc58cb89229024a79336c | 7917fc0a7108a994bf39359385fb5728d189c182 | /paws/tests/testthat/test_efs.R | ce38c6ef58b9ab70241f869a0be531f882dd2e8b | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | false | 396 | r | test_efs.R | svc <- paws::efs()
test_that("describe_access_points", {
expect_error(svc$describe_access_points(), NA)
})
test_that("describe_access_points", {
expect_error(svc$describe_access_points(MaxResults = 20), NA)
})
test_that("describe_file_systems", {
expect_error(svc$describe_file_systems(), NA)
})
test_that("describe_mount_targets", {
expect_error(svc$describe_mount_targets(), NA)
})
|
553f23fd33be69696b0236fab3a02110c7df9df2 | 6ccbeb28582657306ee2a5500ec4396bafad06e4 | /Generation0/EggToAdultViability/DeltaEggToAdultViability.R | c9c7fde10a1fb2ab0b5fca67d2581d79e1e2f753 | [] | no_license | KKLund-Hansen/SexChromCoAdapt | ac890836f047e1363459db2d4bd0b2b2071721b1 | 34712cefaa8b257b2c279969a377b4bbf7174d21 | refs/heads/master | 2021-07-20T10:26:06.996833 | 2021-02-06T19:47:25 | 2021-02-06T19:47:25 | 242,177,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,654 | r | DeltaEggToAdultViability.R | ################################################################################################
############################### ΔEGG-TO-ADULT OFFSPRING VIABILITY ##############################
################################################################################################
#Set up environment
library(Hmisc)
#Read in csv file with data
deltaEtA.data <- read.table(file = "EggToAdultViability.csv", h = T, sep = ",")
########################################## STATISTIC #########################################
### CALCULATE THE PROPORTION OF ECLOSED OFFSPRING ###
deltaEtA.data$prop_eclose <- deltaEtA.data$eclosed / 100
### ΔEGG-TO-ADULT CALCULATIONS ###
#To test if the change of sex chromosomes is significantly from the wild type population,
#we do a bootstrap model to generate CI. If theses don't overlap 0 there has been a significant change
Delta.EtA <- as.numeric(tapply(deltaEtA.data$prop_eclose, deltaEtA.data$population, mean, na.rm = T))
#View Delta.EtA
Delta.EtA
#ΔEtA Inn-Lx - Innisfail
Delta.EtA[3] - Delta.EtA[2]
# 0.012
#ΔEtA Inn-Ly - Innisfail
Delta.EtA[4] - Delta.EtA[2]
# 0.06522222
#ΔEtA Inn-Ox - Innisfail
Delta.EtA[5] - Delta.EtA[2]
# -0.021
#ΔEtA Inn-Oy - Innisfail
Delta.EtA[6] - Delta.EtA[2]
# 0.045
#ΔEtA Odd-Ix - Odder
Delta.EtA[8] - Delta.EtA[7]
# -0.041
#ΔEtA Odd-Iy - Odder
Delta.EtA[9] - Delta.EtA[7]
# 0.083
#ΔEtA Odd-Dx - Odder
Delta.EtA[10] - Delta.EtA[7]
# -0.027
#ΔEtA Odd-Dy - Odder
Delta.EtA[11] - Delta.EtA[7]
# 0.082
#TEST OF PROBABILITY OF SUCCESS. THE PROBABILITY OF POSITIVE VALUE
binom.test(5, 8, p = 0.5, alternative = "two.sided")
#Not significant, P = 0.7266
#First we make new vector to collect the data
LxI <- numeric(10000)
LyI <- numeric(10000)
OxI <- numeric(10000)
OyI <- numeric(10000)
IxO <- numeric(10000)
IyO <- numeric(10000)
DxO <- numeric(10000)
DyO <- numeric(10000)
#Then we set up a bootstrap that resampels the data from 10 data points to calculate a new mean everytime for 10000 times
for (i in 1:10000){
DATA <- do.call(rbind, lapply(split(deltaEtA.data, deltaEtA.data$population), function(x) x[sample(10, replace = T),]))
Delta.EtA <- as.numeric(tapply(DATA$prop_eclose, DATA$population, mean, na.rm = T))
LxI[i] <- Delta.EtA[3] - Delta.EtA[2]
LyI[i] <- Delta.EtA[4] - Delta.EtA[2]
OxI[i] <- Delta.EtA[5] - Delta.EtA[2]
OyI[i] <- Delta.EtA[6] - Delta.EtA[2]
IxO[i] <- Delta.EtA[8] - Delta.EtA[7]
IyO[i] <- Delta.EtA[9] - Delta.EtA[7]
DxO[i] <- Delta.EtA[10] - Delta.EtA[7]
DyO[i] <- Delta.EtA[11] - Delta.EtA[7] }
#Run the calculation
#95% CI for LxI
mean(LxI)
# 0.0120167
mean(LxI) - (1.96 * sd(LxI))
# -0.07538602
mean(LxI) + (1.96 * sd(LxI))
# 0.09941942
#95% CI for LyI
mean(LyI)
# 0.0649472
mean(LyI) - (1.96 * sd(LyI))
# -0.001384755
mean(LyI) + (1.96 * sd(LyI))
# 0.1312792
#95% CI for OxI
mean(OxI)
# -0.0209501
mean(OxI) - (1.96 * sd(OxI))
# -0.09146467
mean(OxI) + (1.96 * sd(OxI))
# 0.04956447
#95% CI for OyI
mean(OyI)
# 0.0449297
mean(OyI) - (1.96 * sd(OyI))
# -0.03566782
mean(OyI) + (1.96 * sd(OyI))
# 0.1255272
#95% CI for IxO
mean(IxO)
# -0.0417557
mean(IxO) - (1.96 * sd(IxO))
# -0.1708264
mean(IxO) + (1.96 * sd(IxO))
# 0.08731496
#95% CI for IyO
mean(IyO)
# 0.0826641
mean(IyO) - (1.96 * sd(IyO))
# -0.01776238
mean(IyO) + (1.96 * sd(IyO))
# 0.1830906
#95% CI for DxO
mean(DxO)
# -0.0279631
mean(DxO) - (1.96 * sd(DxO))
# -0.1436605
mean(DxO) + (1.96 * sd(DxO))
# 0.08773426
#95% CI for DyO
mean(DyO)
# 0.0812175
mean(DyO) - (1.96 * sd(DyO))
# -0.023429
mean(DyO) + (1.96 * sd(DyO))
# 0.185864
######################################### PLOT DATA #########################################
#To be able to plot the bootstrap data as boxplots I create a new data frame with the data
#First I make a vector with the populations
population <- c( rep("aLHmX-Inn", 10000), rep("bLHmY-Inn", 10000), rep("cOddX-Inn", 10000), rep("dOddY-Inn", 10000),
rep("eInnX-Odd", 10000), rep("fInnY-Odd", 10000), rep("gDahX-Odd", 10000), rep("hDahY-Odd", 10000))
#Then I collcet all the bootstrap data in a new vector
deltaPropEclose <- c(LxI, LyI, OxI, OyI, IxO, IyO, DxO, DyO)
#Then it's all collceted in a new data frame
DEtA <- data.frame(population, deltaPropEclose)
#And write it into a new file
write.csv(DEtA, file = "DeltaEggToAdultViabilityplot.csv")
#Read in csv file with data
DEtAplot.data <- read.table(file = "DeltaEggToAdultViabilityplot.csv", h = T, sep = ",")
#MEAN
meanDEtA <- tapply(DEtAplot.data$deltaPropEclose, DEtAplot.data$population, mean, na.rm = T)
#SD
sdDEtA <- tapply(DEtAplot.data$deltaPropEclose, DEtAplot.data$population, sd, na.rm = T)
# Plot
par(mar = c(6, 5, 2, 2))
#Plot errorbars
xDEtA <- c(0.5, 1, 1.5, 2, 3, 3.5, 4, 4.5)
errbar(xDEtA, meanDEtA, meanDEtA + (1.96 * sdDEtA), meanDEtA - (1.96 * sdDEtA),
xlim = c(0.3, 4.7), xlab = "", xaxt = "n", ylim = c(-0.2, 0.2),
ylab = expression(Delta~"Proportion of eclosed offspring"),
cex.axis = 1.2, cex.lab = 1.6, las = 1, pch = c(17, 18), cex = c(3, 3.5), lwd = 3)
#AXIS
axis(1, at = c(0.5, 1, 1.5, 2, 3, 3.5, 4, 4.5), cex.axis = 1.5,
labels = c(expression("L"["X"]), expression("L"["Y"]), expression("O"["X"]), expression("O"["Y"]),
expression("I"["X"]), expression("I"["Y"]), expression("D"["X"]), expression("D"["Y"])))
#Add line at 0
abline(h = 0, lty = 2, lwd = 2)
#And add text on top
mtext(expression(italic("Innisfail")), side = 1, line = 3, at = 1.25, cex = 1.6)
mtext(expression(italic("Odder")), side = 1, line = 3, at = 3.75, cex = 1.6)
#Now add arrows to show significance
points(1, 0.19, pch = "*", bg = "black", cex = 2.5)
|
3b7c1fa3c4d4e5bbe16936f7e1dca758c420fe24 | 9b564709ac525bbce6cd9e88beb73eddd65bbff2 | /COVID_webscraping.R | 8ca5592bf595294cf0ee0967df043b3e009551d8 | [] | no_license | jordanjasuta/webscraping_tools | 21ba15a4469a015701014e53c36a4ac82f4d9344 | e3255c880fa97394b00401b83803da2bb8a4755c | refs/heads/master | 2022-06-20T13:18:28.007158 | 2020-05-10T21:15:39 | 2020-05-10T21:15:39 | 147,696,255 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,187 | r | COVID_webscraping.R | library(rvest)
library(dplyr)
url <- "https://covid19.bz/"
# For a one-time datapull:
covid_table <-as.data.frame(matrix(0, ncol = 6, nrow = 1))
for (column in 1:5){
table_val <- url %>%
read_html() %>%
html_nodes(xpath=paste('//*[@id="content"]/div/div/div/section[7]/div/div/div[',column,']/div/div/div/div/div/div[1]/span[2]')) %>%
html_attr("data-to-value")
covid_table[column] <-table_val
var_name <- url %>%
read_html() %>%
html_nodes(xpath=paste('//*[@id="content"]/div/div/div/section[7]/div/div/div[',column,']/div/div/div/div/div/div[2]')) %>%
html_text()
names(covid_table)[column] <- var_name
}
names(covid_table)[6] <- 'Date Pulled'
covid_table[6] <-Sys.Date()
covid_table
# to add new rows with subsequent datapulls:
vals <- c()
for (column in 1:5){
table_val <- url %>%
read_html() %>%
html_nodes(xpath=paste('//*[@id="content"]/div/div/div/section[7]/div/div/div[',column,']/div/div/div/div/div/div[1]/span[2]')) %>%
html_attr("data-to-value")
vals <- append(vals, table_val)
}
date <- Sys.Date()
vals = append(vals, as.character(date))
covid_table <- rbind(covid_table, 'date' = vals)
covid_table
|
a86cf7fa779b4bb47096706fb3703073f85f4000 | 1ee9dbee0d344056920cd62f052df8e7360da768 | /smuf_runf_0919_KO_randomlinesIR.R | e8a7352ae5a3f696932d73e9ab096b745f6f0bfa | [] | no_license | efsalvarenga/smuf_rdev | f7f6d1e1bab6e246073d855723db29cf92943370 | 648407b0cec07b8bd9c83f085d9409dcc5a57fc8 | refs/heads/master | 2022-03-03T22:16:27.054629 | 2019-10-09T19:03:28 | 2019-10-09T19:03:28 | 88,738,275 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,983 | r | smuf_runf_0919_KO_randomlinesIR.R | #===========================================
# Smart Metering Uncertainty Forecasting
#
# Author Estevao "Steve" Alvarenga
# [email protected]
# Created in 10/Feb/17
#-------------------------------------------
# smuf_main combined functions optim & fcst
#===========================================
#===========================================
# Initialising
#===========================================
setwd("~/GitRepos/smuf_rdev")
# library(tidyr)
# library(dplyr)
source("smuf_main-fxs.R")
savfile = "smuf_runf_0919_KO_randomlinesIR.rds"
wm01_00 <- readRDS("smuf_import-completeIRhour.rds")
importpar <- readRDS("smuf_import-parameter.rds")
s01 <- importpar[1]
s02 <- importpar[2]
s03 <- importpar[3]
sum_of_h <- importpar[4]
data_size <- importpar[5]
#===========================================
# Integrated Parameters
#===========================================
#cus_list to 1000, stp to 150 (detectcores), hrz_lim larger (0:167)*113), turn on CV
cus_list <- seq(1,200)
frontierstp <- 16 # Number of demand bins (Stepwise frontier for portfolio optimisation)
frontierexp <- 1 # Exponentiality of frontier steps
max.gen <- 100 # For genetic opt
waitgen <- 10 # For genetic opt
win_size <- c(4,24) # Small and large win_size (select only 2)
win_selec <- win_size[2]
cross_overh <- 4 # Cross-over forced for fx_fcst_kds_quickvector
ahead_t <- seq(1,72) # Up to s02
hrz_lim <- seq(1,167)*29 # Rolling forecasts steps {seq(0:167)*113} is comprehensive
in_sample_fr <- 1/6 # Fraction for diving in- and out-sample
crossvalsize <- 1 # Number of weeks in the end of in_sample used for crossvalidation
crossvalstps <- 16 # Steps used for multiple crossvalidation (Only KDE)
crossvalfocus <- c(1) # What period is focused when running crossvalidation
is_wins_weeks <- 12 # Number of weeks used for in-sample (KDE uses win_size) & seasonality
sampling <- 1024 # For monte-carlo CRPS calculation
armalags <- c(5,5) # Max lags for ARIMA fit in ARMA-GARCH model (use smuf_lags.R)
gof.min <- 0.05 # GoF crossover value to change ARMA-GARCH to KDS
#===========================================
# Call simulator
#===========================================
bigrndno <- data.frame(V1=double(),
hor=character(),
CRPS=double())
rndres_big <- list()
for (h in hrz_lim){
ptm <- proc.time()
runkey <- Sys.time()
cat("\n\nStep",match(h,hrz_lim), "of",length(hrz_lim),"| Running BIG [h] LOOP with h =",h,"\n")
#===========================================
# Individual customers forecast
#===========================================
cat("[Ind] ")
wm01_01 <- wm01_00[min(cus_list):max(cus_list),]
wl06 <- fx_int_fcstgeneric_kdss(wm01_01,h,in_sample_fr,s01,s02,sum_of_h,win_selec,is_wins_weeks,crossvalsize,fcst_run,armalags,cross_overh,gof.min)
# wv45 <- rowMeans(wl06[[1]])
# sd01 <- as.numeric(fx_sd_mymat(wl06[[3]]))
# wv46 <- seq(0,frontierstp)^frontierexp/frontierstp^frontierexp * sum(wv45)
wv51 <- colMeans(wl06[[2]])
rndres <- wv51
#===========================================
# Random groups & evaluation
#===========================================
cat("[Rnd] ")
rnd.names <- c(10,100,200)
for (c in rnd.names){
cat(c," ")
matmult <- matrix(0,length(cus_list),length(cus_list))
for (j in 1:length(cus_list)){
vecmult <- rep(0,length(cus_list))
vecmult[sample(cus_list,c,replace=F)] = 1/c
matmult[j,] = vecmult
}
wm01_02 <- matmult %*% wm01_01
wl06_02 <- fx_int_fcstgeneric_kdss(wm01_02,h,in_sample_fr,s01,s02,sum_of_h,win_selec,is_wins_weeks,crossvalsize,fcst_run,armalags,cross_overh,gof.min)
rndres <- rbind(rndres,colMeans(wl06_02[[2]]))
}
# # Previous Rndgrp implementation [slow]
# wm01_02l <- fx_rndgrp(wm01_01,frontierstp)
# wm01_02 <- wm01_02l[[1]] / rowSums(wm01_02l[[2]])
# wl06rnd <- fx_int_fcstgeneric_kdss(wm01_02,h,in_sample_fr,s01,s02,sum_of_h,win_selec,is_wins_weeks,crossvalsize,fcst_run,armalags,cross_overh,gof.min)
# wv45rnd <- as.numeric(rowMeans(wl06rnd[[1]]) * rowSums(wm01_02l[[2]]))
# sd01rnd <- as.numeric(fx_sd_mymat(wl06rnd[[3]]))
# cr01rnd <- rowMeans(wl06rnd[[2]])
# rnd_per_no <- as.data.frame(cbind(as.numeric(rowSums(wm01_02l[[2]])),wl06rnd[[2]]))
# rndnosimpl <- rnd_per_no %>%
# gather(hor,crps,-V1) %>%
# group_by(V1,hor) %>%
# summarise(CRPS=mean(crps))
# bigrndno <- rbind(bigrndno,as.data.frame(rndnosimpl))
# saveRDS(bigrndno, file=savfile)
#
# plt.names <- c(1,5,10,20,30,40,50,60,70,80,90,100,120,140,160,180,200)
# mymat <- as.matrix(
# bigrndno %>%
# group_by(V1,hor) %>%
# summarise(CRPS=mean(CRPS)) %>%
# filter(V1 %in% plt.names) %>%
# spread(hor,CRPS)
# )
fx_plt_mymat(rndres,c(0,0.2))
legend('topright', inset=c(0,0), legend = c(1,rnd.names),
lty=1, col=rainbow(1+length(rnd.names)), bty='n', cex=.75, title="Method")
rndres_big[[match(h,hrz_lim)]] <- rndres
cat("\n")
print(proc.time() - ptm)
}
rndres_sum <- Reduce("+", rndres_big) / length(rndres_big)
fx_plt_mymat(rndres_sum,c(0,0.5))
legend('topright', inset=c(0,0), legend = c(1,rnd.names),
lty=1, col=rainbow(1+length(rnd.names)), bty='n', cex=.75, title="Method")
saveRDS(rndres_sum,'rndres_sum.rds')
|
b2befbda5bb77f2e868e27e0961b73e26c72eafe | 1339e9fa22cd678ce3e778acdeb388e884271a6c | /project/costFunction.R | 17f7e408f61a9a05b1115a96d166cda9d7d55958 | [
"MIT"
] | permissive | nkafr/Adult-dataset-analysis | 67718ae3d3390295775bb1c3ecdc722834deae8e | e78ab13d9938e94c524a1750150214982f6eb6cc | refs/heads/master | 2021-01-20T18:36:11.369520 | 2016-07-15T01:36:54 | 2016-07-15T01:36:54 | 63,377,724 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 807 | r | costFunction.R | costFunction <- function(X, y) {
#costfunction Computes cost for logistic regression
function(theta) {
m <- length(y); # m is the number of training examples
#initialize J
J <- 0
#h is the hypothesis function
h <- sigmoid(X %*% theta)
#use a vectorized implementation insted of for loop for faster computation
J <- (t(-y) %*% log(h) - t(1 - y) %*% log(1 - h)) / m
J
}
}
grad <- function(X, y) {
# grad computes the gradient of the cost w.r.t. to the parameters theta.
function(theta) {
m <- length(y); # m is the number of training examples
#initialize grad vector
grad <- matrix(0,dim(as.matrix(theta)))
h <- sigmoid(X %*% theta)
# calculate grads
grad <- (t(X) %*% (h - y)) / m
grad
}
} |
c1f3f4f2fb05157919712bd700502a6a3a9d78a2 | fff9ee52053ff5acd4d358add0793bf4ed6b2aba | /Uppercase_Speaker.R | 9778013431dc1355483d56a7f0c9bd8a4acb0556 | [] | no_license | jfedgerton/Cleaning_CHAMP_Speakers | a6baabb378521c0cdd0d6e930b977cb2a36ff480 | b57c3e376a0c49d2fff361027b5f20adaa90ba7b | refs/heads/master | 2020-04-21T13:09:12.997216 | 2019-02-07T14:59:05 | 2019-02-07T14:59:05 | 169,588,961 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 716 | r | Uppercase_Speaker.R | csv_1278 <- list.files(path = "C:/Users/Jared/Dropbox/CHAMP-Net/Data/Show-Year CSVs/format_csv_1278", pattern = NULL, all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
for (i in 1:length(csv_1278)){
import_file_pathway <- paste0("C:/Users/Jared/Dropbox/CHAMP-Net/Data/Show-Year CSVs/format_csv_1278/", csv_1278[i])
temp <- suppressMessages(read_delim(import_file_pathway,
delim = "\t", escape_double = FALSE, trim_ws = TRUE))
temp$Speaker <- toupper(temp$Speaker)
write.table(temp, file = import_file_pathway, sep = "\t", row.names = F)
}
|
9120bc2c571b82ac6ac3f5070d5a9a2df5cd0fb0 | b139d2dbfcac7b96cdb61737962f2a90f76ddb6b | /R/harmonizeCols.R | 16a150798d5452c7d28028fc2a1c4c4ea8e5c483 | [] | no_license | michaelrahija/FAOSDGdata | ef681a7ad3f715a239425f21d7cea537ef9b1177 | 59e76d4c935161596c3b855c5172675234e99011 | refs/heads/master | 2021-08-22T13:07:05.875370 | 2020-06-11T04:55:40 | 2020-06-11T04:55:40 | 193,698,740 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,678 | r | harmonizeCols.R | #' harmonizeCols
#'
#' This is function which takes as an input a dataframe containing data
#' for a particular indicator, and outputs a dataframe with SDMX concepts as column names and
#' followed by an attribute name. Example: REF_REA
#'
#' @param sdgdf is a data frame referring to a specific SDG
#'
#' @return This function returns a dataframe containing the cleaned data frame
#'
#' @importFrom dplyr select
#' @export
harmonizeCols <- function(sdgdf){
#filter SDG 2.1.2 for mid bounds
if("2.1.2" %in% unique(sdgdf$Indicator)){
sdgdf <- filter(sdgdf, Bounds == "Mid-point")
}
#get rid of any spaces in a column name
colnames(sdgdf) <- gsub(pattern = " ",
replacement = "",
colnames(sdgdf))
#harmonize footnote column name
colnames(sdgdf)[grepl("foot", colnames(sdgdf), ignore.case = T)] <- "Footnote"
if(!("Age.Group" %in% colnames(sdgdf))) {
sdgdf$Age.Group <- NA
}
if(!("Disaggregation" %in% colnames(sdgdf))) {
sdgdf$Disaggregation <- NA
}
if(!("Time_Detail" %in% colnames(sdgdf))) {
sdgdf$Time_Detail <- NA
}
if(!("ReportingType" %in% colnames(sdgdf))) {
sdgdf$ReportingType <- NA
}
if(!("Bounds" %in% colnames(sdgdf))) {
sdgdf$Bounds <- NA
}
#select relevant columns, and provide simple names
relcols <- c("GeoAreaCode",
"GeoAreaName",
"RefAreaType_InternalUseOnly",
"SeriesCode",
"SeriesDescription",
"Indicator",
"TimePeriod",
"Time_Detail",
"Nature",
"Units",
"ReportingType",
"Source",
"Value",
"Age.Group",
"Bounds",
"Disaggregation",
"Footnote",
"version")
df <- select(sdgdf, relcols)
#recode w/ SDMX concept names when possible, when concept
colnames(df) <- c("REF_AREACode",
"REF_AREADesc",
"RefAreaType_InternalUseOnly",
"SERIESCode",
"SERIESDescription",
"SERIESInd",
"TIME_PERIOD",
"TIME_DETAIL",
"NATURE",
"UNIT_MEASURE",
"REPORTING_TYPE",
"SOURCE_DETAIL",
"OBS_VALUE",
"AGE",
"Bounds",
"Disaggregation",
"Footnote",
"version")
#clean classes
df[] <- lapply(df, as.character)
df
} |
eee5e4882bf0ffb154bf626ea2bdff1d12f19b02 | df562e5ef9ea2846cb05319114009c3de7e4dee1 | /MasterR/crea_print_table.R | bd1b52111a93c759bb670bc9f4b61da4da83f425 | [] | no_license | SCelisV/R | a05d9dc1b0bcb2bfabfbe83703db8364edd8a9ab | 0aa0a984dae0c0466addbf6dc0dd629d863f7cf5 | refs/heads/master | 2022-12-23T23:23:40.878996 | 2020-09-30T20:10:21 | 2020-09-30T20:10:21 | 286,298,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 317 | r | crea_print_table.R |
TabA = as.table(cbind(c("A","B","C"),c(1,2,3)))
# > TabA
# A B
# A A 1
# B B 2
# C C 3
TabB = as.table(cbind(c("D","E","F"),c(1,2,3)))
# > TabB
# A B
# A D 1
# B E 2
# C F 3
nams = c(TabA,TabB)
# > nams
# [1] "TabA" "TabB"
for (i in nams){
print (i)
tab = get(nams[i])
print(tab)
print(get(nams[i]))
}
|
bb853f6a9de34eb91b696f6ecdfca9ea92598268 | c2ecf5c58b195b5999c7e0c32f726f894f8723a0 | /MUpdaters/man/MDataUpdater.AddNewField.Rd | ee890ce42f2ed3c3f88672377cebc7e3954452c7 | [] | no_license | pashkovds/mdlibs | d67fef9b021e7b0b20ec3b0eeaa2f099d8eff87c | 8cb0a4a2f12e5f472d039e9ea55ecdbf26202046 | refs/heads/master | 2021-01-01T03:43:41.411779 | 2016-04-23T10:06:55 | 2016-04-23T10:06:55 | 56,423,368 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 380 | rd | MDataUpdater.AddNewField.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MDataUpdater.AddNewField.R
\docType{data}
\name{MDataUpdater.AddNewField}
\alias{MDataUpdater.AddNewField}
\title{MDataUpdater.AddNewField}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
MDataUpdater.AddNewField
}
\description{
MDataUpdater.AddNewField
}
\keyword{datasets}
|
569a88c3b1fd0355523319f5b4be5f4608f14d7c | cc3beea2feb5d66b4df71a96f42129687a1296e7 | /draft/from_R_tips_folder/decimal_position.R | 1f490fd30ce76990c64c210358b4db7ce2991a6d | [] | no_license | YulongXieGitHub/YulongR_Code | 133c90b708c33c447737aaa0b6d01f5c9cb33818 | e1f68c1564fb4036df9500297fbd36548e3b8014 | refs/heads/master | 2021-01-23T15:03:12.427516 | 2015-07-16T01:52:35 | 2015-07-16T01:52:35 | 39,168,963 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,089 | r | decimal_position.R | #
# Profile_thermalSetpoint.R
#
#
# October 28, 2009
# -------------------------------------------------------------------------
#
# eliminate all stuff
#
rm(list = ls(all = TRUE))
start_time <- date();
# format(Sys.time(), "%a %b %d %X %Y %Z")
Start.time <- Sys.time()
set.seed(12345, kind = NULL) # set seed of random number
# -------------------------------------------------------------------------------------------------
# change to the script directory
# -------------------------------------------------------------------------------------------------
if(.Platform$OS.type == "unix")
{
Path.Current <- "/phome/d3l143/FY2010_NAP_DataAnalysis/0_scripts"
}else{
Path.Current <- "C:/YuLong_Projects/FY2010_NAP_DataAnalysis/0_scripts"
}
setwd(Path.Current)
# -------------------------------------------------------------------------------------------------
# include needed objects
# -------------------------------------------------------------------------------------------------
source(paste(Path.Current,"0_CrownePlaza_Function.R",sep="/"))
# -------------------------------------------------------------------------------------------------
# setup output and log directory
# -------------------------------------------------------------------------------------------------
Path.out <- "../Profile_thermalSetpoint" # OUTPUT processed result directory
Path.log <- "../0_log" # OUTPUT log directory
Path.Data <- "../1_CrownePlaza_ReturnAirT" # INPUT data folder retrieved from "nac" on "nac.pnl.gov" server
if (!file.exists(Path.out)){print(paste("NOT existing:",Path.out));dir.create(Path.out,showWarnings=TRUE,recursive=TRUE)}
if (!file.exists(Path.log)){print(paste("NOT existing:",Path.log));dir.create(Path.log,showWarnings=TRUE,recursive=TRUE)}
if (!file.exists(Path.Data)){stop(paste(" INPUT data folder retrieved from \"nac\" on \"nac.pnl.gov\" server does NOT existing\n",sep=""))}
# -------------------------------------------------------------------------------------------------
# create a LOG file and a TIME Recording file
# -------------------------------------------------------------------------------------------------
FL.LOG <- paste(Path.log,"Profile_thermalSetpoint.log",sep="/") # OUTPUT Log file
if (file.exists(FL.LOG)){print(paste(FL.LOG,"exist.Delete it!"));file.remove(FL.LOG)}
cat(paste("Log file for data processing script [Profile_thermalSetpoint.R]!\n",sep=""),file=FL.LOG, append=TRUE)
cat(paste("\n***************************************************************************",
"* [Profile_thermalSetpoint.R] *",
"***************************************************************************",sep="\n"),file=FL.LOG, append=TRUE)
# -------------------------------------------------------------------------------------------------
# define OUTPUT files
# -------------------------------------------------------------------------------------------------
FL.rawData.OBJ <- paste(Path.Data,paste("CrownePlaza_ReturnAirT.Rdata",sep=""),sep="/") # INPUT DATA in Rdata Objects
FL.aggData.OBJ <- paste(Path.Data,paste("CrownePlaza_ReturnAirT_Sum.Rdata",sep=""),sep="/") # INPUT SUM in Rdata Objects
FL.profile.OBJ <- paste(Path.out, paste("CrownePlaza_Profile_ThermalSetPoint.Rdata",sep=""),sep="/") # OUTPUT Profile of Thermal SetPoint
FL.profile.CSV <- paste(Path.out, paste("CrownePlaza_Profile_ThermalSetPoint.CSV",sep=""),sep="/") # OUTPUT Profile of Thermal SetPoint
FL.profile.PDF <- paste(Path.out, paste("CrownePlaza_Profile_ThermalSetPoint.PDF",sep=""),sep="/") # OUTPUT Profile of Thermal SetPoint
FL.profile.IDF <- paste(Path.out, paste("CrownePlaza_Profile_ThermalSetPoint.IDF",sep=""),sep="/") # OUTPUT Profile of Thermal SetPoint
if (!file.exists(FL.rawData.OBJ)){stop(paste(FL.rawData.OBJ," should exist. Find out why it is missing!"));file.remove(FL.rawData.OBJ)} # remove existing OUTPUT files
if (!file.exists(FL.aggData.OBJ)){stop(paste(FL.aggData.OBJ," should exist. Find out why it is missing!"));file.remove(FL.aggData.OBJ)} # remove existing OUTPUT files
if (file.exists(FL.profile.OBJ)){print(paste(FL.profile.OBJ," exist. Delete it!"));file.remove(FL.profile.OBJ)} # remove existing OUTPUT files
if (file.exists(FL.profile.CSV)){print(paste(FL.profile.CSV," exist. Delete it!"));file.remove(FL.profile.CSV)} # remove existing OUTPUT files
if (file.exists(FL.profile.PDF)){print(paste(FL.profile.PDF," exist. Delete it!"));file.remove(FL.profile.PDF)} # remove existing OUTPUT files
if (file.exists(FL.profile.IDF)){print(paste(FL.profile.IDF," exist. Delete it!"));file.remove(FL.profile.IDF)} # remove existing OUTPUT files
cat(paste("\nAll paths and files are defined\n"),file=FL.LOG,append=TRUE)
cat(paste("\nAll paths and files are defined\n"))
# -------------------------------------------------------------------------------------------------
# load the data files
# -------------------------------------------------------------------------------------------------
load(FL.rawData.OBJ) # raw data
load(FL.aggData.OBJ) # hourly aggregated data objects
cat(paste("\nLoaded the data retrieved from the database and the subsequently hourly aggregated data!\n"),file=FL.LOG,append=TRUE)
cat(paste("\nLoaded the data retrieved from the database and the subsequently hourly aggregated data!\n"))
myData[,"day.week"] <- factor(myData[,"day.week"],levels=week.label, labels=week.names) # convert 1-7 to Sun,Mon,...,Sat
myData[,"day.type"] <- factor(myData[,"day.type"], levels=dayType.label,labels=dayType.names) # convert 1 & 7 to "wkend and 2-6 to "wkday"
# -------------------------------------------------------------------------------------------------
# calculating the quantitle and output 25%, 50% and 75% to the CSV file
# -------------------------------------------------------------------------------------------------
#
# calculating the quantitle on the raw data
#
p25 <- tapply(myData[,"Value"],as.factor(as.character(myData[,"hour"])),quantile,0.25,na.rm = TRUE)
p50 <- tapply(myData[,"Value"],as.factor(as.character(myData[,"hour"])),quantile,0.50,na.rm = TRUE)
p75 <- tapply(myData[,"Value"],as.factor(as.character(myData[,"hour"])),quantile,0.75,na.rm = TRUE)
Profile.raw <- data.frame(p25 = p25, p50 = p50, p75= p75)
Profile.raw <- data.frame(hour = as.numeric(as.character(row.names(Profile.raw))),Profile.raw)
Profile.raw <- Profile.raw[order(Profile.raw[,"hour"]),]
cat(paste("\n\nProfile.raw on the raw data\n"),file=FL.profile.CSV,append=TRUE)
write.table(Profile.raw,sep=",",row.names = FALSE,col.names = TRUE,file=FL.profile.CSV,append=TRUE)
#
# calculating the quantitle on the hourly aggregated data
#
p25 <- tapply(Data.daily.roomWise[,"Value"],as.factor(as.character(Data.daily.roomWise[,"hour"])),quantile,0.25,na.rm = TRUE)
p50 <- tapply(Data.daily.roomWise[,"Value"],as.factor(as.character(Data.daily.roomWise[,"hour"])),quantile,0.50,na.rm = TRUE)
p75 <- tapply(Data.daily.roomWise[,"Value"],as.factor(as.character(Data.daily.roomWise[,"hour"])),quantile,0.75,na.rm = TRUE)
Profile.aggHourly <- data.frame(p25 = p25, p50 = p50, p75= p75)
Profile.aggHourly <- data.frame(hour = as.numeric(as.character(row.names(Profile.aggHourly))),Profile.aggHourly)
Profile.aggHourly <- Profile.aggHourly[order(Profile.aggHourly[,"hour"]),]
cat(paste("\n\nProfile.aggHourly on the hourly aggregated data\n"),file=FL.profile.CSV,append=TRUE)
write.table(Profile.aggHourly,sep=",",row.names = FALSE,col.names = TRUE,file=FL.profile.CSV,append=TRUE)
#
# calculating the quantitle on the daytype-hourly aggregated data
#
p25 <- tapply(Data.dayType.roomWise[,"Value"],as.factor(as.character(Data.dayType.roomWise[,"hour"])),quantile,0.25,na.rm = TRUE)
p50 <- tapply(Data.dayType.roomWise[,"Value"],as.factor(as.character(Data.dayType.roomWise[,"hour"])),quantile,0.50,na.rm = TRUE)
p75 <- tapply(Data.dayType.roomWise[,"Value"],as.factor(as.character(Data.dayType.roomWise[,"hour"])),quantile,0.75,na.rm = TRUE)
Profile.aggDayType <- data.frame(p25 = p25, p50 = p50, p75= p75)
Profile.aggDayType <- data.frame(hour = as.numeric(as.character(row.names(Profile.aggDayType))),Profile.aggDayType)
Profile.aggDayType <- Profile.aggDayType[order(Profile.aggDayType[,"hour"]),]
cat(paste("\n\nProfile.aggDayType on the daytype-hourly aggregated data\n"),file=FL.profile.CSV,append=TRUE)
write.table(Profile.aggDayType,sep=",",row.names = FALSE,col.names = TRUE,file=FL.profile.CSV,append=TRUE)
# -------------------------------------------------------------------------------------------------
# output Thermal Setpoint shcedule at (P25, P50 & P75) in Eplus idf format
# -------------------------------------------------------------------------------------------------
#
# output in Eplus idf format (P25)
#
cat(paste("Output the 25% Thermal Setpoint Schedule!\n",sep=""))
cat(paste("Output the 25% Thermal Setpoint Schedule!\n",sep=""),file=FL.LOG,append=TRUE)
cat(paste("\n\nSchedule:Compact,", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" CLGSETP_SCH_P25, !- Name", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Temperature, !- Schedule Type Limits Name","\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Through: 12/31, !- Field 1", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" For: Weekdays SummerDesignDay Saturday WinterDesignDay Sunday Holiday AllOtherDays !- Field 2", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
for (idx in seq(from=1,to=(dim(Profile.raw)[1]-1),by=1))
{
idx.field <- 2 + idx
T <- Profile.raw[idx,"p25"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",idx,":00, ",T.3dec,", !- Field ",idx.field, "\n",sep=""),file=FL.profile.IDF,append=TRUE)
}
idx.field <- idx.field + 1
T <- Profile.raw[dim(Profile.raw)[1],"p25"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",dim(Profile.raw)[1],":00, ",T.3dec,"; !- Field ",idx.field,"\n",sep=""),file=FL.profile.IDF,append=TRUE)
#
# output in Eplus idf format (P50)
#
cat(paste("Output the 50% Thermal Setpoint Schedule!\n",sep=""))
cat(paste("Output the 50% Thermal Setpoint Schedule!\n",sep=""),file=FL.LOG,append=TRUE)
cat(paste("\n\nSchedule:Compact,", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" CLGSETP_SCH_p50, !- Name", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Temperature, !- Schedule Type Limits Name","\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Through: 12/31, !- Field 1", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" For: Weekdays SummerDesignDay Saturday WinterDesignDay Sunday Holiday AllOtherDays !- Field 2", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
for (idx in seq(from=1,to=(dim(Profile.raw)[1]-1),by=1))
{
idx.field <- 2 + idx
T <- Profile.raw[idx,"p50"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",idx,":00, ",T.3dec,", !- Field ",idx.field, "\n",sep=""),file=FL.profile.IDF,append=TRUE)
}
idx.field <- idx.field + 1
T <- Profile.raw[dim(Profile.raw)[1],"p50"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",dim(Profile.raw)[1],":00, ",T.3dec,"; !- Field ",idx.field,"\n",sep=""),file=FL.profile.IDF,append=TRUE)
#
# output in Eplus idf format (p75)
#
cat(paste("Output the 75% Thermal Setpoint Schedule!\n",sep=""))
cat(paste("Output the 75% Thermal Setpoint Schedule!\n",sep=""),file=FL.LOG,append=TRUE)
cat(paste("\n\nSchedule:Compact,", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" CLGSETP_SCH_p75, !- Name", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Temperature, !- Schedule Type Limits Name","\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Through: 12/31, !- Field 1", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" For: Weekdays SummerDesignDay Saturday WinterDesignDay Sunday Holiday AllOtherDays !- Field 2", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
for (idx in seq(from=1,to=(dim(Profile.raw)[1]-1),by=1))
{
idx.field <- 2 + idx
T <- Profile.raw[idx,"p75"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",idx,":00, ",T.3dec,", !- Field ",idx.field, "\n",sep=""),file=FL.profile.IDF,append=TRUE)
}
idx.field <- idx.field + 1
T <- Profile.raw[dim(Profile.raw)[1],"p75"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",dim(Profile.raw)[1],":00, ",T.3dec,"; !- Field ",idx.field,"\n",sep=""),file=FL.profile.IDF,append=TRUE)
#
# output in Eplus idf format for each room
#
for (room.lab in as.character(levels(Data.allDays.roomWise[,"room.name"])))
{
cat(paste("Output the Thermal Setpoint Schedule at room ",room.lab,"!\n",sep=""))
cat(paste("Output the Thermal Setpoint Schedule at room ",room.lab,"!\n",sep=""),file=FL.LOG,append=TRUE)
cat(paste("\n\nSchedule:Compact,", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" ",paste("CLGSETP_SCH_",room.lab,sep="")," !- Name", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Temperature, !- Schedule Type Limits Name","\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" Through: 12/31, !- Field 1", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
cat(paste(" For: Weekdays SummerDesignDay Saturday WinterDesignDay Sunday Holiday AllOtherDays !- Field 2", "\n",sep=""),file=FL.profile.IDF,append=TRUE)
for (idx.hour in seq(from=0,to=22,by=1))
{
idx.field <- 2 + idx.hour
idx <- seq(from=1,to=dim(Data.allDays.roomWise)[1],by=1)[as.character(Data.allDays.roomWise[,"room.name"]) == room.lab & Data.allDays.roomWise[,"hour"] == idx.hour]
T <- Data.allDays.roomWise[idx,"Value"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",idx.hour+1,":00, ",T.3dec,", !- Field ",idx.field, "\n",sep=""),file=FL.profile.IDF,append=TRUE)
}
idx.field <- idx.field + 1
idx <- seq(from=1,to=dim(Data.allDays.roomWise)[1],by=1)[as.character(Data.allDays.roomWise[,"room.name"]) == room.lab & Data.allDays.roomWise[,"hour"] == 23]
T <- Data.allDays.roomWise[idx,"Value"]
ltrs <- substring(T,1:nchar(T),1:nchar(T))
T.3dec <- substr(T,1,which(ltrs==".")+3)
cat(paste(" Until: ",24,":00, ",T.3dec,"; !- Field ",idx.field,"\n",sep=""),file=FL.profile.IDF,append=TRUE)
}
# -------------------------------------------------------------------------------------------------
# plotting
# -------------------------------------------------------------------------------------------------
cat(paste("Plot the profiles!\n",sep=""))
cat(paste("Plot the profiles!\n",sep=""),file=FL.LOG,append=TRUE)
pdf(file = FL.profile.PDF,paper="a4r", width=0, height=0)
# -------------------------------------------------------------------------------------------------
# Variation Across Rooms [myData]
# -------------------------------------------------------------------------------------------------
cat(paste("\nprofile across all rooms and all days based on raw data!\n"),file=FL.LOG,append=TRUE)
cat(paste("\nprofile across all rooms and all days based on raw data!\n"))
# get the y limit for plotting
y.limit <- range(myData[,"Value"])
# round to tenth place
y.limit <- c(floor(floor(min(y.limit))/10)*10,ceiling(ceiling(max(y.limit))/10)*10)
y.limit <- c(50,100) # overwrite with this fixed limits for return air temperature
# weekend
idx.row <- as.character(myData[,"day.type"]) == "wkend"
boxplot(myData[idx.row,"Value"] ~ factor(myData[idx.row,"hour"]),
boxwex = 0.15, notch=FALSE, at = c(0:23) - 0.20,cex=1.0,
xaxt="n", #
main=paste("Variation Across Rooms ","(raw data)",sep=""),
xlab="Hour",
ylab = expression("Return Air T F"^{o}),
ylim=y.limit,
outcex=0.7,outcol="blue",staplecol="blue",whiskcol="blue",boxcol="blue",boxfill="white",medlwd=2.0,medcol="green")
# weekeday
idx.row <- as.character(myData[,"day.type"]) == "wkday"
boxplot(myData[idx.row,"Value"] ~ factor(myData[idx.row,"hour"]), add = TRUE,
boxwex = 0.15, notch=FALSE, at = c(0:23) + 0.20,cex=1.0,
col="red",outcex=0.7,outcol="red",staplecol="red",whiskcol="red",boxcol="red",boxfill="white",medlwd=2.0,medcol="green")
# all data
boxplot(myData[idx.row,"Value"] ~ factor(myData[idx.row,"hour"]), add = TRUE,
boxwex = 0.15, notch=FALSE, at = c(0:23) + 0.00,cex=1.0,
xaxt="n",
col="black",outcex=0.7,outcol="black",staplecol="black",whiskcol="black",boxcol="black",boxfill="white",medlwd=2.0,medcol="green")
abline(h=c(50,55,60,65,70,75,80,85,90,95),lty=2)
# if (flag.hor){abline(h=value.hor,lty=2)}
# legends (from package gplots)
smartlegend(x="right",y="top", inset = 0.05,
c("wkend","all","wkday"),
col=c("blue","black","magenta"),
fill = c("blue","black","magenta"))
# -------------------------------------------------------------------------------------------------
# Variation Across Rooms
# -------------------------------------------------------------------------------------------------
cat(paste("\nprofile based on hourly aggregated data at each room across all days!\n"),file=FL.LOG,append=TRUE)
cat(paste("\nprofile based on hourly aggregated data at each room across all days!\n"))
# get the y limit for plotting
y.limit <- range(Data.daily.roomWise[,"Value"])
# round to tenth place
y.limit <- c(floor(floor(min(y.limit))/10)*10,ceiling(ceiling(max(y.limit))/10)*10)
y.limit <- c(50,100) # overwrite with this fixed limits for return air temperature
# weekend
idx.row <- as.character(Data.daily.roomWise[,"day.type"]) == "wkend"
boxplot(Data.daily.roomWise[idx.row,"Value"] ~ factor(Data.daily.roomWise[idx.row,"hour"]),
boxwex = 0.15, notch=FALSE, at = c(0:23) - 0.20,cex=1.0,
xaxt="n", #
main=paste("Variation Across Rooms ","(hourly aggregated data for each day)",sep=""),
xlab="Hour",
ylab = expression("Return Air T F"^{o}),
ylim=y.limit,
outcex=0.7,outcol="blue",staplecol="blue",whiskcol="blue",boxcol="blue",boxfill="white",medlwd=2.0,medcol="green")
# weekeday
idx.row <- as.character(Data.daily.roomWise[,"day.type"]) == "wkday"
boxplot(Data.daily.roomWise[idx.row,"Value"] ~ factor(Data.daily.roomWise[idx.row,"hour"]), add = TRUE,
boxwex = 0.15, notch=FALSE, at = c(0:23) + 0.20,cex=1.0,
col="red",outcex=0.7,outcol="red",staplecol="red",whiskcol="red",boxcol="red",boxfill="white",medlwd=2.0,medcol="green")
# all data
boxplot(Data.daily.roomWise[idx.row,"Value"] ~ factor(Data.daily.roomWise[idx.row,"hour"]), add = TRUE,
boxwex = 0.15, notch=FALSE, at = c(0:23) + 0.00,cex=1.0,
xaxt="n",
col="black",outcex=0.7,outcol="black",staplecol="black",whiskcol="black",boxcol="black",boxfill="white",medlwd=2.0,medcol="green")
abline(h=c(50,55,60,65,70,75,80,85,90,95),lty=2)
# if (flag.hor){abline(h=value.hor,lty=2)}
# legends (from package gplots)
smartlegend(x="right",y="top", inset = 0.05,
c("wkend","all","wkday"),
col=c("blue","black","magenta"),
fill = c("blue","black","magenta"))
# -------------------------------------------------------------------------------------------------
# Variation Across Rooms
# -------------------------------------------------------------------------------------------------
cat(paste("\nprofile based on hourly aggregated data across all weekends or weekdays across all rooms!\n"),file=FL.LOG,append=TRUE)
cat(paste("\nprofile based on hourly aggregated data across all weekends or weekdays across all rooms!\n"))
# get the y limit for plotting
y.limit <- range(Data.dayType.roomWise[,"Value"])
# round to tenth place
y.limit <- c(floor(floor(min(y.limit))/10)*10,ceiling(ceiling(max(y.limit))/10)*10)
y.limit <- c(50,100) # overwrite with this fixed limits for return air temperature
# weekend
idx.row <- as.character(Data.dayType.roomWise[,"day.type"]) == "wkend"
boxplot(Data.dayType.roomWise[idx.row,"Value"] ~ factor(Data.dayType.roomWise[idx.row,"hour"]),
boxwex = 0.15, notch=FALSE, at = c(0:23) - 0.20,cex=1.0,
xaxt="n", #
main=paste("Variation Across Rooms ","(hourly aggregated data for all weekdays and weekends)",sep=""),
xlab="Hour",
ylab = expression("Return Air T F"^{o}),
ylim=y.limit,
outcex=0.7,outcol="blue",staplecol="blue",whiskcol="blue",boxcol="blue",boxfill="white",medlwd=2.0,medcol="green")
# weekeday
idx.row <- as.character(Data.dayType.roomWise[,"day.type"]) == "wkday"
boxplot(Data.dayType.roomWise[idx.row,"Value"] ~ factor(Data.dayType.roomWise[idx.row,"hour"]), add = TRUE,
boxwex = 0.15, notch=FALSE, at = c(0:23) + 0.20,cex=1.0,
col="red",outcex=0.7,outcol="red",staplecol="red",whiskcol="red",boxcol="red",boxfill="white",medlwd=2.0,medcol="green")
# all data
boxplot(Data.dayType.roomWise[idx.row,"Value"] ~ factor(Data.dayType.roomWise[idx.row,"hour"]), add = TRUE,
boxwex = 0.15, notch=FALSE, at = c(0:23) + 0.00,cex=1.0,
xaxt="n",
col="black",outcex=0.7,outcol="black",staplecol="black",whiskcol="black",boxcol="black",boxfill="white",medlwd=2.0,medcol="green")
abline(h=c(50,55,60,65,70,75,80,85,90,95),lty=2)
# if (flag.hor){abline(h=value.hor,lty=2)}
# legends (from package gplots)
smartlegend(x="right",y="top", inset = 0.05,
c("wkend","all","wkday"),
col=c("blue","black","magenta"),
fill = c("blue","black","magenta"))
dev.off()
# -------------------------------------------------------------------------------------------------
# time used for completing this script
# -------------------------------------------------------------------------------------------------
End.time <- Sys.time()
Diff.time <- End.time - Start.time
Diff.time
cat(paste("\nProfile_thermalSetpoint.R is finished successfully at ",End.time,"!\n",sep=" "))
cat(paste("\nProfile_thermalSetpoint.R is finished successfully at ",End.time,"!\n",sep=" "),file=FL.LOG,append=TRUE)
cat(paste("\nProcessing time for [Profile_thermalSetpoint.R] is ",as.numeric(Diff.time, units="mins")," minutes\n",sep=" "))
cat(paste("\nProcessing time for [Profile_thermalSetpoint.R] is ",as.numeric(Diff.time, units="mins")," minutes\n",sep=" "),file=FL.LOG,append=TRUE)
|
a6dad4b699c72a5d8a63c861ecbc0cfd5af84f19 | 57965d63586beb192af1a2f8974fdd5630a3964b | /man/np.deneqtest.Rd | 62bf0477c127ec7acccc2f699a8646c1abb65a41 | [] | no_license | JeffreyRacine/R-Package-np | 6fee493cbd555cabe976d2f9c14cd10aef99c665 | 525db82ebc67423728888daf66ce0d9fdd70bbc7 | refs/heads/master | 2023-08-31T13:32:00.925187 | 2023-08-27T13:08:45 | 2023-08-27T13:08:45 | 1,957,067 | 41 | 23 | null | 2022-08-12T15:40:15 | 2011-06-26T20:09:34 | C | UTF-8 | R | false | false | 3,959 | rd | np.deneqtest.Rd | % $Id: np.cmstest.Rd,v 1.58 2006/11/03 21:17:20 tristen Exp $
\name{npdeneqtest}
\alias{npdeneqtest}
\title{ Kernel Consistent Density Equality Test with Mixed Data Types }
\description{
\code{npdeneqtest} implements a consistent integrated squared
difference test for equality of densities as described in Li, Maasoumi,
and Racine (2009).
}
\usage{
npdeneqtest(x = NULL,
y = NULL,
bw.x = NULL,
bw.y = NULL,
boot.num = 399,
random.seed = 42,
\dots)
}
\arguments{
\item{x,y}{
data frames for the two samples for which one wishes to
test equality of densities. The variables in each data
frame must be the same (i.e. have identical names).
}
\item{bw.x,bw.y}{
optional bandwidth objects for \code{x,y}
}
\item{boot.num}{
an integer value specifying the number of bootstrap
replications to use. Defaults to \code{399}.
}
\item{random.seed}{
an integer used to seed R's random number generator. This is to
ensure replicability. Defaults to 42.
}
\item{\dots}{ additional arguments supplied to specify the bandwidth
type, kernel types, and so on. This is used if you do not pass in
bandwidth objects and you do not desire the default behaviours. To
do this, you may specify any of \code{bwscaling}, \code{bwtype},
\code{ckertype}, \code{ckerorder}, \code{ukertype},
\code{okertype}.}
}
\value{
\code{npdeneqtest} returns an object of type \code{deneqtest} with the
following components
\item{Tn}{ the (standardized) statistic \code{Tn} }
\item{In}{ the (unstandardized) statistic \code{In} }
\item{Tn.bootstrap}{ contains the bootstrap replications of \code{Tn} }
\item{In.bootstrap}{ contains the bootstrap replications of \code{In} }
\item{Tn.P}{ the P-value of the \code{Tn} statistic }
\item{In.P}{ the P-value of the \code{In} statistic }
\item{boot.num}{ number of bootstrap replications }
\code{\link{summary}} supports object of type \code{deneqtest}.
}
\references{
Li, Q. and E. Maasoumi and J.S. Racine (2009), \dQuote{A Nonparametric
Test for Equality of Distributions with Mixed Categorical and
Continuous Data,} Journal of Econometrics, 148, pp 186-200.
}
\author{
Tristen Hayfield \email{[email protected]}, Jeffrey S. Racine
\email{[email protected]}
}
\details{
\code{npdeneqtest} computes the integrated squared density difference
between the estimated densities/probabilities of two samples having
identical variables/datatypes. See Li, Maasoumi, and Racine (2009) for
details.
}
\section{Usage Issues}{
If you are using data of mixed types, then it is advisable to use the
\code{\link{data.frame}} function to construct your input data and not
\code{\link{cbind}}, since \code{\link{cbind}} will typically not work as
intended on mixed data types and will coerce the data to the same
type.
It is crucial that both data frames have the same variable names.
}
\seealso{
\code{\link{npdeptest},\link{npsdeptest},\link{npsymtest},\link{npunitest}}
}
\examples{
\dontrun{
set.seed(1234)
## Distributions are equal
n <- 250
sample.A <- data.frame(x=rnorm(n))
sample.B <- data.frame(x=rnorm(n))
npdeneqtest(sample.A,sample.B,boot.num=99)
Sys.sleep(5)
## Distributions are unequal
sample.A <- data.frame(x=rnorm(n))
sample.B <- data.frame(x=rchisq(n,df=5))
npdeneqtest(sample.A,sample.B,boot.num=99)
## Mixed datatypes, distributions are equal
sample.A <- data.frame(a=rnorm(n),b=factor(rbinom(n,2,.5)))
sample.B <- data.frame(a=rnorm(n),b=factor(rbinom(n,2,.5)))
npdeneqtest(sample.A,sample.B,boot.num=99)
Sys.sleep(5)
## Mixed datatypes, distributions are unequal
sample.A <- data.frame(a=rnorm(n),b=factor(rbinom(n,2,.5)))
sample.B <- data.frame(a=rnorm(n,sd=10),b=factor(rbinom(n,2,.25)))
npdeneqtest(sample.A,sample.B,boot.num=99)
} % enddontrun
}
\keyword{ nonparametric }
|
5cf217615541810e5c41d0258996ec5d73dedf19 | 465ad1d280890bf23acf6a4e473d0aff03bbef0c | /code/run_flash_drift_rand.R | 293e1811b65f3c4dcfa3a0f3f5b96c7beb5f5a00 | [] | no_license | jhmarcus/drift-workflow | c6fef2ea3392296266fb72d15aee0ae77a30a8c5 | da05b47f313f183f994155384ca6790f7b865d6e | refs/heads/master | 2022-12-17T14:55:06.367786 | 2020-09-18T18:24:11 | 2020-09-18T18:24:11 | 170,155,451 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 818 | r | run_flash_drift_rand.R | library(tidyverse)
library(ebnm)
library(flashier)
library(drift.alpha)
library(softImpute)
library(lfa)
options(extrapolate.control=list(beta.max=1.0))
args <- commandArgs(trailingOnly=TRUE)
bed_prefix <- args[1]
rds_path <- args[2]
K <- as.integer(args[3])
KCOMPLETE <- 30
# read the genotype matrix
Y <- t(lfa:::read.bed(bed_prefix))
# complete missing data before running
fit <- softImpute(Y, rank=KCOMPLETE, lambda=0.0, type="als")
Y_imp <- complete(Y, fit)
# random init
n <- nrow(Y_imp)
EL <- matrix(runif(n * K), nrow=n, ncol=K)
EL[, 1] <- 1
EF <- t(solve(crossprod(EL), crossprod(EL, Y)))
dr <- drift(init_from_EL(Y, EL, EF), miniter=20, maxiter=20, extrapolate=FALSE, verbose=TRUE)
dr <- drift(dr, miniter=2, maxiter=2500, tol=1e-4, extrapolate=TRUE, verbose=TRUE)
# save the rd
saveRDS(dr, rds_path)
|
0de611082a2bddc0227d74dfd7757850ab6f53df | 0479b5e809beae1d18a9c6b603305d674fd5b12e | /man/combine_pvalue.Rd | 5f1f2312c02333b38e78346b5b32e5c9b476cfae | [] | no_license | huerqiang/GeoTcgaData | ecbd292e37df065ae4697c7dd07027c1e665853d | cc85914f2a17177164c7ae426f8f0f09f91e98c1 | refs/heads/master | 2023-04-12T10:04:20.034688 | 2023-04-04T05:57:04 | 2023-04-04T05:57:04 | 206,305,770 | 6 | 0 | null | null | null | null | UTF-8 | R | false | true | 835 | rd | combine_pvalue.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SNP.R
\name{combine_pvalue}
\alias{combine_pvalue}
\title{combine pvalues of SNP difference analysis result}
\usage{
combine_pvalue(snpResult, snp2gene, combineMethod = min)
}
\arguments{
\item{snpResult}{data.frame of SNP difference analysis result.}
\item{snp2gene}{data frame of two column: snp and gene.}
\item{combineMethod}{Method of combining the
pvalue of multiple snp in a gene.}
}
\value{
data.frame
}
\description{
combine pvalues of SNP difference analysis result
}
\examples{
snpResult <- data.frame(pvalue = runif(100), estimate = runif(100))
rownames(snpResult) <- paste0("snp", seq_len(100))
snp2gene <- data.frame(snp = rownames(snpResult),
gene = rep(paste0("gene", seq_len(20)), 5))
result <- combine_pvalue(snpResult, snp2gene)
}
|
20ce30e4de48ce65df7a3ad3c6ccb6609a2a0788 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/neatmaps/examples/formatCluster.Rd.R | b1b5cb3d738ad51f53b2561652aaf9d15bbe0199 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 237 | r | formatCluster.Rd.R | library(neatmaps)
### Name: formatCluster
### Title: Format Cluster Output
### Aliases: formatCluster
### ** Examples
# dummy cluster results
clustList <- list(c("A", "B"), c("C", "D", "E"))
formatCluster(clusterList = clustList)
|
ef1c23c631398b1e199f9c5cafe121ce32b0e7df | cc1b9747506561c5f306415307c0862704c526b0 | /secr-analysis.R | 3787c9b3fd4d4f9c426a5c708a191c048198c6f9 | [] | no_license | cwsjitu/SECR-Simulation-Analysis | b4062f465ba0c2df6dceb2e5a3708391835289d8 | 52708ee520ba2bed4c42a1c1111494ab15beae45 | refs/heads/master | 2021-01-20T09:16:35.868909 | 2017-05-04T10:01:40 | 2017-05-04T10:01:40 | 90,232,064 | 0 | 0 | null | 2017-05-04T07:03:34 | 2017-05-04T07:03:34 | null | UTF-8 | R | false | false | 579 | r | secr-analysis.R |
##set your working directory with
# setwd()
library(secr)
library(raster)
library(rgdal)
library(sp)
library(maptools)
library(rgeos)
library(dplyr)
## coordinate system
latlong = "+init=epsg:4326" ## LatLon Projection
ingrid = "+init=epsg:32643" ## UTM Projection
## assumptions to generate capture history
N <- ## Number of individuals/activity centers
traploc <- ## Number of trap locations
g0 <- ## value between 0.1 and 0.3
sigma <- ## depends on home range of species
occasions <- 10 ## 10 sampling occasions |
bf31c6a02ca8150d62a921d566b1ec8a55e4ba40 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.end.user.computing/man/appstream_describe_application_fleet_associations.Rd | 3bd33cd440d610fe81433a1abe76ba4fa8d4f025 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,001 | rd | appstream_describe_application_fleet_associations.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appstream_operations.R
\name{appstream_describe_application_fleet_associations}
\alias{appstream_describe_application_fleet_associations}
\title{Retrieves a list that describes one or more application fleet
associations}
\usage{
appstream_describe_application_fleet_associations(
FleetName = NULL,
ApplicationArn = NULL,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{FleetName}{The name of the fleet.}
\item{ApplicationArn}{The ARN of the application.}
\item{MaxResults}{The maximum size of each page of results.}
\item{NextToken}{The pagination token used to retrieve the next page of results for this
operation.}
}
\description{
Retrieves a list that describes one or more application fleet associations. Either ApplicationArn or FleetName must be specified.
See \url{https://www.paws-r-sdk.com/docs/appstream_describe_application_fleet_associations/} for full documentation.
}
\keyword{internal}
|
9b7f1ca2ab9da28e5156620557882d392ea96a20 | 881d461d3ca9c3acf2d4076e7ea042053fd0d6e6 | /alldeaths_2016.R | 82ef7ac94f79e0155c63f6b09e076eff8f2e2ae7 | [] | no_license | markocherrie/SpatialClustering | f35326bb0f4d1a29feaf70a84da59faa5a7686f6 | 2f5c1fc1c8b99d15846a51a5a660ac3165069ed2 | refs/heads/master | 2022-11-22T03:17:48.172696 | 2020-07-24T10:46:56 | 2020-07-24T10:46:56 | 282,066,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,560 | r | alldeaths_2016.R | ############### STEP 1: PRE_PROCESSING
# read in the data
library(readr)
library(sf)
dz<-read_sf("boundaries/DZ/SG_DataZoneBdry_2011/SG_DataZone_Bdry_2011.shp")
simd<-read_csv("data/SIMD2016indicators.csv")
# pre-processing
library(dplyr)
dzsimd<- dz %>%
left_join(simd, by= c("DataZone" = "Data_Zone")) %>%
filter(Council_area%in%c("Glasgow City")) %>%
mutate(SMR=as.numeric(as.character(SMR))) %>%
mutate(SMR= ifelse(is.na(SMR), median(SMR, na.rm=TRUE), SMR)) %>%
select(DataZone, Name, SMR)
# plot with sf
plot(dzsimd["SMR"])
### plot with tmap
library(tmap)
tmap_mode("plot")
tmap_dzsimd<-tm_shape(dzsimd) +
tm_fill("SMR", style = "quantile", palette = "Blues") +
tm_borders(alpha = 0.1) +
tm_layout(main.title = "SMR in Glasgow 2016", main.title.size = 0.7 ,
legend.position = c("right", "bottom"), legend.title.size = 0.8)
tmap_dzsimd
tmap_mode("view")
tmap_dzsimd
# construct neighbours list from polygon list
library(sp)
library(spdep)
# convert to sp object
dzsimd_sp <- as(dzsimd, "Spatial")
w <- poly2nb(dzsimd_sp, row.names=dzsimd_sp$DataZone)
summary(w)
# Plot the boundaries
plot(dzsimd_sp, col='white', border='gray', lwd=2)
# Get polygon centroids
xy <- coordinates(dzsimd_sp)
# Draw lines between the polygons centroids for neighbours
# that are listed as linked in w
plot(w, xy, col='red', lwd=2, add=TRUE)
############### STEP 2: CLUSTER PROCESSING
# listw type spatial weights object
ww <- nb2listw(w, style='B')
ww
# calculate moran's I
moran(dzsimd$SMR, ww, n=length(ww$neighbours), S0=Szero(ww))
# Calculate whether significant using monte carlo method
set.seed(1234)
dzsimdmc_results <- moran.mc(dzsimd$SMR, ww, nsim=1000)
dzsimdmc_results
plot(dzsimdmc_results, main="", las=1)
# we need a weights matrix for this
wm <- nb2mat(w, style='B')
# row standardisation of weights matrix
rwm <- mat2listw(wm, style='W')
mat <- listw2mat(rwm)
moran.plot(dzsimd$SMR, rwm, las=1)
# Decomposition of global indicators
locm_dzsimd <- localmoran(dzsimd$SMR, ww)
summary(locm_dzsimd)
# scale the SMR
dzsimd$s_SMR <- scale(dzsimd$SMR) %>% as.vector()
# Generate the lag
dzsimd$lag_s_SMR <- lag.listw(ww, dzsimd$s_SMR)
# create a dataframe with SMR + spatial laggged SMR
x <- dzsimd$s_SMR
y <- dzsimd$lag_s_SMR
xx <- tibble::data_frame(x,y)
############### STEP 3: CLUSTER RESULTS
# Identify significant clusters
dzsimdsp <- st_as_sf(dzsimd) %>%
mutate(quad_sig = ifelse(dzsimd$s_SMR > 0 &
dzsimd$lag_s_SMR > 0 &
locm_dzsimd[,5] <= 0.05,
"high-high",
ifelse(dzsimd$s_SMR <= 0 &
dzsimd$lag_s_SMR <= 0 &
locm_dzsimd[,5] <= 0.05,
"low-low",
ifelse(dzsimd$s_SMR> 0 &
dzsimd$lag_s_SMR <= 0 &
locm_dzsimd[,5] <= 0.05,
"high-low",
ifelse(dzsimd$s_SMR <= 0 &
dzsimd$lag_s_SMR > 0 &
locm_dzsimd[,5] <= 0.05,
"low-high",
"non-significant")))))
# plot the significant clusters
qtm(dzsimdsp, fill="quad_sig", fill.title="LISA for SMR 2016", fill.palette = c("#DC143C","#87CEFA","#DCDCDC"))
# Summary table of clusters
table(dzsimdsp$quad_sig)
# let's get the data out so we can use later
smrclusterchange <-
dzsimdsp %>%
select(DataZone, quad_sig) %>%
left_join(smr2020cluster, by="DataZone") %>%
mutate(quad_sigchange = ifelse(quad_sigSMR2020 == quad_sig,"No change",
ifelse(quad_sig == "non-significant" & quad_sigSMR2020 =="high-high", "Non-sig to high-high",
ifelse(quad_sig == "non-significant" & quad_sigSMR2020 =="low-low", "Non-sig to low-low",
ifelse(quad_sig == "low-low" & quad_sigSMR2020 =="non-significant", "Low-low to non-sig",
ifelse(quad_sig == "high-high" & quad_sigSMR2020 =="non-significant", "High-high to non-sig",
"Non-classified"))))))
qtm(smrclusterchange, fill="quad_sigchange", fill.title="Change in cluster 2016-2020", fill.palette = c("#ffc0cb","#32CD32","#DCDCDC", "#DC143C", "#87CEFA"))
|
c3a9a3b7ff73b669087bd3ba47f57d40b2c78c10 | 399f7bc329848e396ca8faa449a3bd23aec7b35f | /man/has_name.Rd | cb7e82b1029dbfeb4861643d2e5ea198c8c55f3f | [] | no_license | cran/container | c9cbe63c8aee7be8b0406cadfb4bcabf4031333b | c3c3fff6cc67740d4d1820f338285c04cf80c92d | refs/heads/master | 2022-12-24T03:30:41.848446 | 2022-12-11T10:20:02 | 2022-12-11T10:20:02 | 145,894,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 928 | rd | has_name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/has_name.R
\name{has_name}
\alias{has_name}
\alias{has_name.Container}
\alias{has_name.dict.table}
\title{Check for Name}
\usage{
has_name(x, name)
\method{has_name}{Container}(x, name)
\method{has_name}{dict.table}(x, name)
}
\arguments{
\item{x}{any \code{R} object.}
\item{name}{\code{character} the name to be found.}
}
\value{
\code{TRUE} if name is in \code{x} and otherwise \code{FALSE}.
For \code{dict.table} \code{TRUE} if the dict.table objects has the given
column name, otherwise \code{FALSE}.
}
\description{
Check for Name
}
\examples{
co = container(a = 1, 2, f = mean)
has_name(co, "a") # TRUE
has_name(co, "f") # TRUE
has_name(co, "2") # FALSE
dit = dict.table(a = 1:2, b = 3:4)
has_name(dit, "a") # TRUE
has_name(dit, "x") # FALSE
}
\seealso{
\code{\link[=has]{has()}}
}
|
22a12698f640d626de21251a22f6b82bd10f83f9 | 8cffdd5f866185e8529376326b9c3accac583930 | /man/getL-methods.Rd | 7497467c4fda0c4cd8e2804c8d43fb5a29c48314 | [] | no_license | cran/simctest | fc4a187ae7ad926dbbae9a71967b44795f3de85d | eadc866013858443cbea9a6acd999b1143207373 | refs/heads/master | 2021-01-16T18:32:13.653593 | 2019-11-04T12:20:02 | 2019-11-04T12:20:02 | 17,699,678 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 571 | rd | getL-methods.Rd | \name{getL-methods}
\alias{getL}
\docType{methods}
\alias{getL-methods}
\alias{getL,sampalgPrecomp-method}
\title{Methods for Function getL in Package `simctest'}
\description{
Returns the lower boundary for the stopping rule
}
\usage{
##S4 method
getL(alg,ind)
}
\arguments{
\item{alg}{the sampling algorithm}
\item{ind}{a vector of indices at which the lower stopping boundary should
be returned}
}
\section{Methods}{
\describe{
\item{alg = "sampalgPrecomp"}{ the sampling algorithm to be used }
}}
\examples{
getL(getalgprecomp(),1:100)
}
\keyword{methods}
|
cb7961b9aa636ee81f6a767954341d58d80192c7 | 9cfd9c26181fef9a29cdb0934a7609c45305054c | /hrda/ui.R | 4bcbe7c456c49a231904c0b55eecd8fd214a8986 | [] | no_license | suntreeshl/hrda | 08106507bb6158114865dad8e57b6daf27fcd171 | fe01a1948e5f7a45040ed0334670076a12f4af27 | refs/heads/master | 2022-11-11T20:31:03.639049 | 2020-07-03T13:22:35 | 2020-07-03T13:22:35 | 269,610,537 | 0 | 0 | null | 2020-06-05T11:10:46 | 2020-06-05T11:10:46 | null | UTF-8 | R | false | false | 2,247 | r | ui.R | library(shiny)
library(shinydashboard)
library(tidyverse)
library(ggplot2)
library(DT)
# Define UI for application that draws a histogram
ui <- dashboardPage(skin="blue",
dashboardHeader(
title = "HR 데이터 분석"
),
dashboardSidebar(
sidebarMenu(
# Setting id makes input$tabs give the tabName of currently-selected tab
id = "tabs",
menuItem("근속현황", tabName = "dashboard", icon = icon("dashboard")),
menuItem("상관분석", icon = icon("th"), tabName = "widgets",
badgeColor = "green"),
menuItem("시각화", icon = icon("bar-chart-o"),
menuSubItem("Sub-item 1", tabName = "subitem1"),
menuSubItem("Sub-item 2", tabName = "subitem2")
),
#menuItem("About", tabName = "about")
menuItem("About",
menuSubItem("프로젝트", tabName = "about"),
menuSubItem("raw", tabName = "raw")
)
)
),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
),
tabItems(
tabItem(tabName = "dashboard",
h2("Dashboard tab content")
),
tabItem(tabName = "widgets",
h2("Widgets tab content")
),
tabItem(tabName = "subitem1",
h2("subitem1 tab content")
),
tabItem(tabName = "subitem2",
h2("subitem2 tab content")
),
tabItem(tabName = "about",
h2("개발 계획서"),
includeMarkdown("doc/plan.rmd")
),
tabItem(tabName = "raw",
h2("원본 데이터"),
column(4,
selectInput("att",
"퇴사여부:",
c("All",
unique(as.character(data$Attrition))))
),
DT::dataTableOutput("table")
)
)
)
) |
058bbc179d6f13347ca20837928f975f4fa8293c | cb48a3993cddaf8f8cfcf78b3aa7419a1bb62fc9 | /plot4.R | 87d7b629e006dab699f6285e7732a6904595e169 | [] | no_license | BowenRaymone/ExData_Plotting1 | b7bd6caa05ca1db8032544eac6812c070e3a66e8 | cd3214738f402792904ae9898046659d57ff8d10 | refs/heads/master | 2021-01-24T03:43:18.813627 | 2018-02-26T03:21:44 | 2018-02-26T03:21:44 | 122,902,958 | 1 | 0 | null | 2018-02-26T02:43:40 | 2018-02-26T02:43:40 | null | UTF-8 | R | false | false | 1,433 | r | plot4.R | # run the file
power_consumption <- read.table("./household_power_consumption.txt",header = TRUE,stringsAsFactors=FALSE,sep=";")
sub_power_consumption <- power_consumption[power_consumption$Date %in% c("1/2/2007","2/2/2007") ,]
# remove the total data set to clean some memory
rm("power_consumption")
# convert the date time using strptime
datetime <- strptime(paste(sub_power_consumption$Date, sub_power_consumption$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# plot of Global_active_power into png
png("plot4.png", width=800, height=800)
par(mfrow = c(2, 2))
# plot Global_active_power
plot(datetime, as.numeric(sub_power_consumption$Global_active_power), type="l", xlab="", ylab="Global Active Power", cex=0.2)
# plot Voltage
plot(datetime, as.numeric(sub_power_consumption$Voltage), type="l", xlab="datetime", ylab="Voltage")
# plot energy
plot(datetime, as.numeric(sub_power_consumption$Sub_metering_1), type="l", ylab="Energy Submetering", xlab="")
lines(datetime, as.numeric(sub_power_consumption$Sub_metering_2), type="l", col="red")
lines(datetime, as.numeric(sub_power_consumption$Sub_metering_3), type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
# plot Global_reactive_power
plot(datetime, as.numeric(sub_power_consumption$Global_reactive_power), type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
7d332e4dc0535a35a1bf4cb71ec558dae96fde6e | de936b8365a44c245fae40ab116a42e17997e6a7 | /regtable.R | 38def7af21d4bd8ccd949f1f2eae84e71dbfbe56 | [] | no_license | KoenV/regtable | aa47b60dc1640d7632f386877a4606f8dbcb149c | 2d1c08b7e79c5ba65a3c3028b5efea0b806e55ac | refs/heads/master | 2021-01-18T16:06:44.594349 | 2017-04-24T13:28:20 | 2017-04-24T13:28:20 | 86,712,896 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,034 | r | regtable.R | # ##############################################################################
# function to make tables for lm and glm objects
# [email protected]
# date: 24/04/2017
################################################################################
reg_table = function(fit=fit,data=data,log=FALSE,roundings=3){
require(Hmisc)
require(car)
suppressMessages(require(tibble))
format_pval.table <- function(x){
if (x < .001) return(paste('<', '.001'))
else paste(round(x, 3))
}
model_variables = attr(fit$terms,"term.labels")
summary_table = summary(fit)$coef
if(log==TRUE){
summary_table[,1]=exp(summary_table[,1])
}
table = vector('list',length(model_variables)+1)
for (i in 1:length(model_variables)){
if (is.factor(data[,model_variables[i]])){
table[[i]] = as_tibble(matrix(NA,
nlevels(data[,model_variables[i]])+1,5))
names(table[[i]]) = c('Variable','Value',
ifelse(log==TRUE,'Odds Ratio','Estimate'),'95% CI',
'P-value')
table[[i]][1,1] = ifelse(label(data[,model_variables[i]])=='',
model_variables[i],label(data[,model_variables[i]]))
# @ overal p-value
test = if(log==TRUE){
anova(fit,test='LRT')
}else{anova(fit)}
logical_row = grepl(model_variables[i],rownames(test))
table[[i]][1,5] = ifelse(nrow(table[[i]])==3,'',
format_pval.table(test[logical_row,5]))
table[[i]][1,2:4] = ''
table[[i]][2:nrow(table[[i]]),1] = ''
table[[i]][2:nrow(table[[i]]),2] = levels(data[,model_variables[i]])
table[[i]][2,3:5] = '#'
spfc_smmry = grepl(model_variables[i],rownames(summary_table))
table[[i]][3:nrow(table[[i]]),3] = round(
summary_table[spfc_smmry,1],roundings)
ci = suppressMessages(confint(fit))
if(log==TRUE){
ci=exp(ci)
}
lower_ci = unname(round(ci[spfc_smmry,1],roundings))
upper_ci = unname(round(ci[spfc_smmry,2],roundings))
table[[i]][3:nrow(table[[i]]),4] = paste0(lower_ci,';',upper_ci)
table[[i]][3:nrow(table[[i]]),5] = sapply(1:sum(spfc_smmry),
function(x){
format_pval.table(summary_table[spfc_smmry,4][x])
})
}else{
table[[i]] = as_tibble(matrix(NA,1,5))
names(table[[i]]) = c('Variable','Value',
ifelse(log==TRUE,'Odds Ratio','Estimate'),'95% CI',
'P-value')
table[[i]][1,1] = ifelse(label(data[,model_variables[i]])=='',
model_variables[i],label(data[,model_variables[i]]))
table[[i]][1,2] = ''
spfc_smmry = grepl(model_variables[i],rownames(summary_table))
table[[i]][1,3] = round(summary_table[spfc_smmry,1],roundings)
ci = suppressMessages(confint(fit))
if(log==TRUE){
ci=exp(ci)
}
lower_ci = unname(round(ci[spfc_smmry,1],roundings))
upper_ci = unname(round(ci[spfc_smmry,2],roundings))
table[[i]][1,4] = paste0(lower_ci,';',upper_ci)
table[[i]][1,5] = format_pval.table(summary_table[spfc_smmry,4])
}
}
return(do.call('rbind',table))
}
|
850673a39ea7d7d2459c22a8758e98457a4afe73 | 02638637685acc16f9a404d9f589d0c0dd0cb784 | /tests/testthat.R | 4b29043cbec893c70706968b8c9c85ab6be2fc73 | [] | no_license | himiko14122/mdsstat | cada89ba3bfeef3b38d5a0140fb643fb7b521550 | dfa250f58d6a09662062382b24144dc3194524b4 | refs/heads/master | 2022-04-12T22:05:48.970010 | 2020-03-08T14:50:02 | 2020-03-08T14:50:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(mdsstat)
test_check("mdsstat")
|
7b400881e3bd9eb295fc17ebb0c5643eb64e40b0 | 4970a3f8a4ca8a42a6fb22f454265691544f1810 | /man/galaxy.Rd | 540f5a353af1402a1808af66030ccc89d29b821f | [] | no_license | Penncil/xmeta | d2ee5b14843d88f1b28c3e3755816269103cbbcd | 832b3f244648818cf2df2691ec5dd7bfa21bc810 | refs/heads/master | 2023-04-08T17:04:05.411553 | 2023-04-04T17:05:36 | 2023-04-04T17:05:36 | 249,091,838 | 4 | 1 | null | 2020-03-22T01:27:08 | 2020-03-22T01:27:07 | null | UTF-8 | R | false | true | 3,301 | rd | galaxy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/galaxy.R
\name{galaxy}
\alias{galaxy}
\title{Galaxy Plot: A New Visualization Tool of Bivariate Meta-Analysis Studies}
\usage{
galaxy(data, y1, s1, y2, s2, scale1, scale2, scale.adj,
corr, group, study.label, annotate, xlab, ylab, main, legend.pos)
}
\arguments{
\item{data}{dataset with at least 4 columns for the effect sizes of the two outcomes and their standard errors}
\item{y1}{column name for outcome 1, default is 'y1'}
\item{s1}{column name for standard error of \code{y1}, default is 's1'}
\item{y2}{column name for outcome 2, default is 'y2'}
\item{s2}{column name for standard error of \code{y2}, default is 's2'}
\item{scale1}{parameter for the length of the cross hair: the ellipse width is scale1 / s1 * scale.adj}
\item{scale2}{parameter for the length of the cross hair: the ellipse height is scale2 / s2 * scale.adj}
\item{scale.adj}{a pre-specified parameter to adjust for \code{scale1} and \code{scale2}}
\item{corr}{column name for within-study correlation}
\item{group}{column name for study group}
\item{study.label}{column name for study label}
\item{annotate}{logical specifying whether study label should be added to the plot, default is FALSE.}
\item{xlab}{x axis label, default \code{y1}}
\item{ylab}{y axis label, default \code{y2}}
\item{main}{main title}
\item{legend.pos}{The position of the legend for study groups if \code{group} is specified, see \code{legend}, default is 'bottomright'.}
}
\description{
A new visualization method that simultaneously presents the effect sizes of bivariate outcomes and their standard errors in a two-dimensional space.
}
\details{
This function returns the galaxy plot to visualize bivariate meta-analysis data,
which faithfully retains the information in two separate funnel plots, while providing
useful insights into outcome correlations, between-study heterogeneity and joint asymmetry.
Galaxy plot: a new visualization tool of bivariate meta-analysis studies.
Funnel plots have been widely used to detect small study effects in the results of
univariate meta-analyses. However, there is no existing visualization tool that is
the counterpart of the funnel plot in the multivariate setting. We propose a new
visualization method, the galaxy plot, which can simultaneously present the effect sizes
of bivariate outcomes and their standard errors in a two-dimensional space.
The galaxy plot is an intuitive visualization tool that can aid in interpretation
of results of multivariate meta-analysis. It preserves all of the information presented
by separate funnel plots for each outcome while elucidating more complex features that
may only be revealed by examining the joint distribution of the bivariate outcomes.
}
\examples{
data(sim_dat)
galaxy(data=sim_dat, scale.adj = 0.9, corr = 'corr', group = 'subgroup',
study.label = 'study.id', annotate = TRUE, main = 'galaxy plot')
}
\references{
Hong, C., Duan, R., Zeng, L., Hubbard, R., Lumley, T., Riley, R., Chu, H., Kimmel, S., and Chen, Y. (2020)
Galaxy Plot: A New Visualization Tool of Bivariate Meta-Analysis Studies, American Journal of Epidemiology, https://doi.org/10.1093/aje/kwz286.
}
\author{
Chuan Hong, Chongliang Luo, Yong Chen
}
|
b65ce37adce03922c019afd08d1777bc1323a06a | f6887dbd9e53746a835ab8553bbfd40255ff333e | /bin/analyse_structure.R | 1e9724023e4c848e0172d4a64c424fa2503628f2 | [
"MIT"
] | permissive | amchakra/tosca | 38d1d4e43a917623076430035497447331584f63 | 6a902524c818e104cbcf9bf753828a289ee32905 | refs/heads/main | 2023-06-10T10:18:04.766415 | 2022-10-17T16:11:15 | 2022-10-17T16:11:15 | 260,152,891 | 3 | 2 | MIT | 2023-03-07T13:34:43 | 2020-04-30T08:17:03 | Nextflow | UTF-8 | R | false | false | 4,052 | r | analyse_structure.R | #!/usr/bin/env Rscript
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(toscatools))
suppressPackageStartupMessages(library(rslurm))
suppressPackageStartupMessages(library(tictoc))
suppressPackageStartupMessages(library(parallel))
suppressPackageStartupMessages(library(optparse))
option_list <- list(make_option(c("", "--hybrids"), action = "store", type = "character", help = "Hybrids file"),
make_option(c("", "--fasta"), action = "store", type = "character", help = "Transcript fasta"),
make_option(c("", "--output"), action = "store", type = "character", help = "Output file"),
make_option(c("", "--nodes"), action = "store", type = "integer", default = 100, help = "Number of nodes to allocate [default: %default]"),
make_option(c("", "--shuffled_mfe"), action = "store_true", type = "logical", help = "Calculate shuffled binding energy (100 iterations)", default = FALSE),
make_option(c("", "--clusters_only"), action = "store_true", type = "logical", help = "Analyse structure for hybrids in clusters only", default = FALSE))
opt_parser = OptionParser(option_list = option_list)
opt <- parse_args(opt_parser)
# Load genome
message("Loading genome...")
tic()
genome.fa <- Biostrings::readDNAStringSet(opt$fasta)
genome.dt <- data.table(gene_id = names(genome.fa),
sequence = as.character(genome.fa))
toc()
hybrids.dt <- fread(opt$hybrids)
setkey(hybrids.dt, name)
stopifnot(!any(duplicated(hybrids.dt$name))) # Check no duplicates
# Get sequences
message("Getting sequences...")
tic()
hybrids.dt <- get_sequence(hybrids.dt = hybrids.dt, genome.dt = genome.dt)
toc()
stopifnot(!any(is.na(c(hybrids.dt$L_sequence, hybrids.dt$L_sequence))))
sel.hybrids.dt <- hybrids.dt[!(L_seqnames == "rRNA_45S" & R_seqnames == "rRNA_45S")]
sel.hybrids.dt <- sel.hybrids.dt[!(L_seqnames == "rDNA" & R_seqnames == "rDNA")]
sel.hybrids.dt <- sel.hybrids.dt[!(L_seqnames == "rRNA_5S" & R_seqnames == "rRNA_5S")]
if(opt$clusters_only) sel.hybrids.dt <- sel.hybrids.dt[!is.na(cluster)][cluster != "."]
# Getting MFE and structure
message("Analysing structure...")
tic()
# Cluster jobs
sjob <- slurm_apply(analyse_structure, sel.hybrids.dt[, .(name, L_sequence, R_sequence)],
jobname = sapply(strsplit(basename(opt$hybrids), "\\."), "[[", 1),
nodes = opt$nodes,
cpus_per_node = 1,
slurm_options = list(time = "24:00:00"),
submit = TRUE)
Sys.sleep(60)
status <- FALSE
while(status == FALSE) {
squeue.out <- system(paste("squeue -n", sjob$jobname), intern = TRUE) # Get contents of squeue for this job
if(length(squeue.out) == 1) status <- TRUE # i.e. only the header left
Sys.sleep(60)
}
structure.list <- get_slurm_out(sjob)
cleanup_files(sjob) # Remove temporary files
structure.dt <- rbindlist(structure.list)
hybrids.dt <- merge(hybrids.dt, structure.dt, by = "name")
# Do shuffled depending on flag
if(opt$shuffled_mfe) {
sjob <- slurm_apply(get_shuffled_mfe, sel.hybrids.dt[, .(name, L_sequence, R_sequence)],
jobname = sapply(strsplit(basename(opt$hybrids), "\\."), "[[", 1),
nodes = opt$nodes,
cpus_per_node = 1,
slurm_options = list(time = "24:00:00"),
submit = TRUE)
Sys.sleep(60) # To give it enough time to submit before the first check
status <- FALSE
while(status == FALSE) {
squeue.out <- system(paste("squeue -n", sjob$jobname), intern = TRUE) # Get contents of squeue for this job
if(length(squeue.out) == 1) status <- TRUE # i.e. only the header left
Sys.sleep(60)
}
mfe.list <- get_slurm_out(sjob)
cleanup_files(sjob) # Remove temporary files
mfe.dt <- rbindlist(mfe.list)
hybrids.dt <- merge(hybrids.dt, mfe.dt, by = "name")
}
fwrite(hybrids.dt, opt$output, sep = "\t")
message("Completed!") |
0915baaab07dc6de5020e9d0c0089c749772e4fa | 517ac8ca5bef92173ec4d9f62dee3a8075c8291c | /man/compare.results.vs.cqt.Rd | bc6eb29cfbf7a048ce373f8bd3648e31756cbd1d | [] | no_license | cran/CME.assistant | 5d9695b3e31e8916ec59e6d60b45135b144ef8d2 | 9e225c336693abbffd41c6c63e5acdcd024633e1 | refs/heads/master | 2023-03-23T14:28:23.960474 | 2021-03-22T09:30:02 | 2021-03-22T09:30:02 | 317,813,689 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 543 | rd | compare.results.vs.cqt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/4.CC_funcs.R
\name{compare.results.vs.cqt}
\alias{compare.results.vs.cqt}
\title{Compare the saved cqt vs results}
\usage{
compare.results.vs.cqt(dt_results, dt_cqt)
}
\arguments{
\item{dt_results}{obtained by `read.all.results.csv(results_dir_list)`}
\item{dt_cqt}{obtained by `get.dt.cqt(dir_cqt_files)`}
}
\description{
Should be the same as compare CC profiles vs results. It is used to check if
the cqt file and CC profile are both updated in the same time
}
|
935060ae2fcebf6de1d15c1a77f70b4b22f00a57 | 5fb78098a301178b381be6dd3207527aceef34e9 | /install.packages.R | 148ea088b35dd47f2442c9ccd2bf9688d64d68c4 | [] | no_license | wan-yang/cancer_cohort_trends | e38ccbc0befff99643ab9a374d3fdd68b57d8954 | 89a19babfbfd412c3ec37f570893c996ec1f4f2c | refs/heads/master | 2022-04-12T05:22:42.276900 | 2020-02-05T14:42:07 | 2020-02-05T14:42:07 | 219,533,936 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 224 | r | install.packages.R | ## To install R packages for the analysis
## Install multiple packages
install.packages(c("RColorBrewer",
"data.table",
"magrittr",
"stringr",
"ggplot2")) |
c60258247b152517529fdc526b83140cd0f3eb8d | 89b3c5d320e4b0ae9be7d28c9e4cba640e8b069f | /OldExams/20170814/exam_2017-08-14_solutions.R | ad8277b7810312a42425398577cb5fe0b0357f1b | [] | no_license | andrea003/KursRprgm | f5c1cb1dc803052c8d19ec6b176f40ebc2694d0c | 4f707c834038d18b2e65ac74f6eb5c204e4cea71 | refs/heads/master | 2022-11-28T12:29:30.766426 | 2020-08-10T08:07:15 | 2020-08-10T08:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,897 | r | exam_2017-08-14_solutions.R |
#------------------------------------------------------
# 1
# a)
a<-5
b<-2
(factorial(x = a)-b^a)^-0.1
# b)
data(OrchardSprays)
my_df<-OrchardSprays
dim(my_df)[1]
index<-seq(from = 1,to = dim(my_df)[1],by = 2)
my_df<-my_df[index,1:2]
head(my_df)
tail(my_df)
# c)
letters
my_text<-rep(letters,1000)
my_text
# d)
x_vect<-rep(c(1,2,3,2,1),each=3)
A<-matrix(data = x_vect,nrow = 5,ncol = 3,byrow = TRUE)
rownames(A)<-c("w","a","r","y","f")
A
x_vect2<-rep(c(1,2,3,2,1),times=3)
matrix(data = c(1,2,3,2,1),nrow = 5,ncol = 3,byrow = FALSE)
# 2)
# a)
for(i in 1:7){
vec1<-rep("xyz",8-i)
print(vec1)
vec2<-rep("abc",i)
print(vec2)
}
# b)
# för att förstå den kumulativa summan kör:
?cumsum
cbind(1:10,cumsum(1:10))
i<-1
s<-0
while(s+i<50){
s<-s+i
#print(paste("i:",i,"s:",s))
if(s%%2==0){
print("Go!")
}else if(s%%5==0){
print(sin(s))
}else{
print(s)
}
i<-i+1
}
# 3:
library(stringr)
library(lubridate)
robot<-readLines(con = "/home/joswi05/Dropbox/Josef/732G33_VT2020/KursRprgm/OldExams/20170814/wiki_robot.txt")
# b)
a<-str_extract_all(string = robot,pattern = "(^| )[Tt]he ")
b<-str_extract(string = unlist(a),pattern = "[Tt]he")
length(b)
# c)
date1<-ymd("1919-05-24")
date2<-ymd("1921-09-12")
date3<-ymd("1918-11-28")
date4<-ymd("2017-08-14")
# i )
wday(date1,label = TRUE,abbr = FALSE)
# ii )
interval(start = date1,end = date2)/days(1)
# iii )
floor(interval(start = date3,end = date2)/weeks(1))
# iv )
floor(interval(start = date1,end = date4)/months(1))
# 4 )
# a)
my_curve<-function(x,a=3){
no_obs<-length(x)
loop_vect<-1:no_obs
y<-rep(0,no_obs)
for(i in loop_vect){
if(x[i]<=-2){ # fall 1: x<=-2
y[i]<-4
}else if(-2<x[i]&x[i]<1){ # fall 2: -2<x<1
y[i]<-x[i]^2
}else{ # fall 3: 1<=x
y[i]<-6-a*x[i]
}
}
return(y)
}
my_curve(x = c(-10,-20,-2,-1.5))
my_curve(x = c(-2,0,0.99,1))
my_curve(x = c(-1,0,1,2,4.5),a = 5)
curve(expr = my_curve,from = -3,to = 3)
my_curve<-function(x,a=3){
return(ifelse(x<=-2,4,ifelse(-2<x&x<1,x^2,(6-a*x))))
}
my_curve(x = c(-10,-20,-2,-1.5))
my_curve(x = c(-2,0,0.99,1))
my_curve(x = c(-1,0,1,2,4.5),a = 5)
curve(expr = my_curve,from = -3,to = 3)
# b)
my_var<-function(x){
n<-length(x)
a<-sum(x)/n
val<-sum((x-a)^2)/(n-1)
return(val)
}
my_var(x = 1:100)
my_var(x = c(2,4,7,2,2.5,6))
my_var(x = c(-3,4,6,5,-10))
# 5)
# a)
library(ggplot2)
data(iris)
ggplot(data = iris,aes(x=Sepal.Length,y=Sepal.Width))+geom_point(aes(color=Species),shape=2)
# b)
iris$Petal.Length
index1<-iris$Species=="versicolor"
index2<-iris$Species=="virginica"
g<-t.test(x = iris$Petal.Length[index1],y=iris$Petal.Length[index2],alternative = "l",conf.level = 0.9)
g
t_value<-g$statistic
t_value
p_value<-g$p.value
p_value
# c)
var_res<-aggregate(x = iris$Sepal.Width,by=list(iris$Species),FUN=median)
var_res
var_res[order(var_res$x)[1],]
|
bb8e350e76f6c97bd84eef03ddb1e436a0ba080e | 1897ae5489b64fae9aa083d62f51254cfe52d26f | /VII semester/machine-learning/labovi/lv4.R | 8514edf90f6989727344d301bc3bddfbc1e0e6f3 | [
"Unlicense",
"LicenseRef-scancode-proprietary-license"
] | permissive | MasovicHaris/etf-alles | f1bfe40cab2de06a26ceb46bdb5c47de2e6db73e | 0ab1ad83d00fafc69b38266edd875bce08c1fc9e | refs/heads/main | 2022-01-01T18:22:54.072030 | 2021-12-22T09:05:05 | 2021-12-22T09:05:05 | 138,169,714 | 9 | 15 | Unlicense | 2020-03-29T23:36:50 | 2018-06-21T12:50:51 | C++ | UTF-8 | R | false | false | 1,417 | r | lv4.R | library(ISLR)
library(dplyr)
library(MASS)
library(caret)
## Zadatak 1
data("Smarket")
data <- Smarket
# ispis varijabli
names(data)
# dimenzija seta
dim(data)
# deskriptivna statistika
summary(data)
# korelacije
cor(select(data, -Direction))
# podjela seta, test set 2015 godina, trening set sve ostalo
training <- data[data$Year != '2005', ]
train_ind <- sample(seq_len(nrow(training)))
test <- data[data$Year == '2005', ]
# LDA na na osnovu Lag1 i Lag2
lda.model <- lda(Direction ~ Lag1 + Lag2, data = data, subset = train_ind)
# predikcija
lda.pred <- predict(lda.model, test)
# lda.pred$class
# konfuzion matrix
confusionMatrix(lda.pred$class, test$Direction)
## Zadatak 2
# QDA na na osnovu Lag1 i Lag2
qda.model <- qda(Direction ~ Lag1 + Lag2, data = data, subset = train_ind)
# predikcija
qda.pred <- predict(qda.model, test)
# lda.pred$class
# konfuzion matrix
confusionMatrix(qda.pred$class, test$Direction)
## Zadatak 3
# LDA na na osnovu svih varijalbi
lda.model <- lda(Direction ~ ., data = data, subset = train_ind)
# predikcija
lda.pred <- predict(lda.model, test)
# lda.pred$class
# konfuzion matrix
confusionMatrix(lda.pred$class, test$Direction)
# QDA na na osnovu svih varijabli
qda.model <- qda(Direction ~ ., data = data, subset = train_ind)
# predikcija
qda.pred <- predict(qda.model, test)
# lda.pred$class
# konfuzion matrix
confusionMatrix(qda.pred$class, test$Direction)
|
42480249822817162ec14dd1fec4ecb3033a7c1f | 2fdf31dceb15a4932c3775e877b56f76c3aeb87b | /man/queue_endpoint.Rd | 0d955c6f44e5a5199ea93f9ca303b607eae5f14f | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | cloudyr/AzureQstor | d135f1cca754157f22227f6f73ac5c6778c7bfa4 | 218afec46676ce689c72959c174291cff0a84fad | refs/heads/master | 2021-05-24T07:45:42.830059 | 2021-01-12T19:36:42 | 2021-01-12T19:36:42 | 253,456,783 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,889 | rd | queue_endpoint.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/endpoint.R
\name{queue_endpoint}
\alias{queue_endpoint}
\title{Create a queue endpoint object}
\usage{
queue_endpoint(
endpoint,
key = NULL,
token = NULL,
sas = NULL,
api_version = getOption("azure_storage_api_version")
)
}
\arguments{
\item{endpoint}{The URL (hostname) for the endpoint, of the form \verb{http[s]://\{account-name\}.queue.\{core-host-name\}}. On the public Azure cloud, endpoints will be of the form \verb{https://\{account-name\}.queue.core.windows.net}.}
\item{key}{The access key for the storage account.}
\item{token}{An Azure Active Directory (AAD) authentication token. This can be either a string, or an object of class AzureToken created by \link[AzureRMR:reexports]{AzureRMR::get_azure_token}. The latter is the recommended way of doing it, as it allows for automatic refreshing of expired tokens.}
\item{sas}{A shared access signature (SAS) for the account.}
\item{api_version}{The storage API version to use when interacting with the host. Defaults to \code{"2019-07-07"}.}
}
\value{
An object of class \code{queue_endpoint}, inheriting from \code{storage_endpoint}.
}
\description{
Create a queue endpoint object
}
\details{
This is the queue storage counterpart to the endpoint functions defined in the AzureStor package.
}
\examples{
\dontrun{
# obtaining an endpoint from the storage account resource object
AzureRMR::get_azure_login()$
get_subscription("sub_id")$
get_resource_group("rgname")$
get_storage_account("mystorage")$
get_queue_endpoint()
# creating an endpoint standalone
queue_endpoint("https://mystorage.queue.core.windows.net/", key="access_key")
}
}
\seealso{
\code{\link[AzureStor:storage_endpoint]{AzureStor::storage_endpoint}}, \code{\link[AzureStor:storage_endpoint]{AzureStor::blob_endpoint}}, \code{\link{storage_queue}}
}
|
eeace51979f602567b5b5e6e7abd808bf015a62a | 09fb6336818f768df7b255b58937d51caab7bc7e | /man/getPkmid.Rd | 2c058690242430ed3b0f524f2531919f3afc4653 | [] | no_license | pb-jlee/R-pbh5 | 2cad361e8c87c8559704184266a260c62f71a901 | 6ff481a98bc58ca1b307988a57f4fbbe4bdb492e | refs/heads/master | 2020-12-25T11:15:02.247726 | 2011-10-13T01:53:40 | 2011-10-13T01:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 849 | rd | getPkmid.Rd | \name{getPkmid}
\alias{getPkmid}
\title{
Computes the Pkmid Values
}
\description{
'getPkmid' computes the Pkmid values, a component of Nignal to Noise
Ratio (SNR). A vector of Pkmid values is computed for each alignment
in the cmph5 file.
}
\usage{
getPkmid(cmpH5, idx)
}
\arguments{
\item{cmpH5}{
An object of class \code{PacBioCmpH5}.
}
\item{idx}{
The indices of the alignments to retrieve.
}
}
\details{
Pkmid values are calculated from Pulse Sigma and Baseline Sigma values.
}
\value{
'getPkmid' returns a list of vectors with numeric values.
}
\examples{
require(pbh5)
cmpH5 <- PacBioCmpH5(system.file("h5_files", "aligned_reads.cmp.h5",
package = "pbh5"))
pkmid <- getPkmid(cmpH5, idx = 1:10)
boxplot(pkmid,
main = "Distribution of Pkmid values for the first ten alignments")
}
\keyword{datasets}
|
d48a3043e46e3cf74a84d21833bbf61dda230662 | 69a62f8dab62e35a0fcb2f23bfd35bc4f401324f | /man/select_groups.Rd | b7fa651316da6eb410cc72b01ee93e69ba167ebe | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | kleinschmidt/daver | d86f880374094bcfe96ea7683a04e85c96ae30e1 | 501c8dfbf77af49ff512beae7d09e582dd8adc94 | refs/heads/master | 2021-01-19T04:37:35.600284 | 2018-03-28T19:32:21 | 2018-03-28T19:32:21 | 46,999,898 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 357 | rd | select_groups.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{select_groups}
\alias{select_groups}
\title{Select groups of grouped tbl}
\usage{
select_groups(data, groups)
}
\arguments{
\item{data}{Grouped data_frame.}
\item{groups}{Group numbers to select}
}
\description{
Useful for examining or testing one example group
}
|
8a9585b4bea1c07caf0f0c209ecb84d6b3008c0c | 95c1c71916337e940f4520f9824b18e5a9499fdd | /DAAG/data/mignonette.R | 253ec1cca8f55208c770c9cafcfb2a67390b527a | [] | no_license | VladSerhiienko/MachineLearningLabworks | 96c488efa2c1c3bc7725aa2f0056ffe44047b307 | 32ddedf45e9bb29c01156253d88f8e57e432182a | refs/heads/master | 2020-05-29T21:50:25.888848 | 2014-12-17T20:09:38 | 2014-12-17T20:09:38 | 28,153,442 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 572 | r | mignonette.R | "mignonette" <-
structure(list(cross = c(21, 14.25, 19.125, 7, 15.125, 20.5,
17.375, 23.875, 17.125, 20.75, 16.125, 17.75, 16.25, 10, 10,
22.125, 19, 18.875, 16.5, 19.25, 25.25, 22, 8.75, 14.25), self = c(12.875,
16, 11.875, 15.25, 19.125, 12.5, 16.25, 16.25, 13.375, 13.625,
14.5, 19.5, 20.875, 7.875, 17.75, 9, 11.5, 11, 16, 16.375, 14.75,
16, 14.375, 14.25)), .Names = c("cross", "self"), row.names = c("1",
"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24"
), class = "data.frame")
|
76c880bbe68917ebdbf4613b1f29fc4005512bc7 | b1467d59c6171a09fa4fe55e740c0383bf5ea904 | /man/dis.Rd | 9152520221d5c895cdd60fb0c4eae2e594874320 | [] | no_license | cran/qgen | 458812019cfa3afc8f46bf58676064c6f0908d90 | 2a6f207bd97a0458447b33752a1c35b84416e015 | refs/heads/master | 2021-01-18T15:08:55.644673 | 2007-01-24T00:00:00 | 2007-01-24T00:00:00 | 17,719,289 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 592 | rd | dis.Rd | \name{dis}
\alias{dis}
\title{
Bootstrap confidence intervals
}
\description{
Calculates different bootstrap confidence intervals.
}
\usage{
dis(path="~/qgen/", alpha=0.05)
}
\arguments{
\item{path}{path searched for \code{stat}X\code{.rda}-files.}
\item{alpha}{number indicating the two sided error probability}
}
\details{
Depending on the available levels of resampling, percentile , basic (for
simple resampling) and studentized (for nested resampling) confidence
intervals are calculated
}
\value{
the confidence intervals are printed
}
\examples{
### dis()
}
\keyword{file} |
6492b486741f1aa2ff30cf6791aa3dcdc928c83c | a66434dda737b754fc8597c4bc7c6bdeb0e0a962 | /Basics/Intro to package/moving-average.R | 4cb3938c34dd975f8438f16e82f37023b1e19225 | [] | no_license | lazy-mind/Time-Series-Analysis | 05e5f208c080517c7b5809213ebc4f7a274b5dab | ad0b14cd6020b9f023e6b7cf86670f796eb055ab | refs/heads/master | 2020-08-06T18:14:57.706534 | 2019-10-06T23:11:39 | 2019-10-06T23:11:39 | 213,103,552 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 586 | r | moving-average.R | # Announcement of the company may have impact that last 2 days
# moving average of order 2: 2 days back MA(2)
noise = rnorm(10000)
ma_2 = NULL
# get ma2 process
for (i in 3:10000) {
ma_2[i] = noise[i]+0.7*noise[i-1]+0.2*noise[i-2]
}
# shift data
moving_average_process = ma_2[3:10000]
moving_average_process = ts(moving_average_process)
par(mfrow=c(2,1))
plot(moving_average_process, main = "order 2", col = "blue")
acf(moving_average_process, main="moving_average_process acf")
# see that have no auto correlation with t =0 after t =2, which is right because we dont have info
|
6879efa9a983f59815a53ad5370cc481e3a73508 | 0e1dd4e156415271dfe8f4dfd20d5b9665e8c977 | /man/plot.ccamforecast.Rd | af59c437ea3d2829c8e8aaec0d4158ecb558f007 | [] | no_license | elisvb/CCAM | b2a711641b955185851fecc6ca2fde5b24d1b468 | 1d1cc18416b242437495f9aa11cd1f84cb466783 | refs/heads/master | 2023-03-16T15:43:06.737809 | 2023-03-09T19:42:22 | 2023-03-09T19:42:22 | 133,826,645 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 332 | rd | plot.ccamforecast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{plot.ccamforecast}
\alias{plot.ccamforecast}
\title{Plot ccamforecast object}
\usage{
\method{plot}{ccamforecast}(x, ...)
}
\arguments{
\item{x}{...}
\item{...}{extra arguments}
}
\description{
Plot ccamforecast object
}
\details{
...
}
|
683aae559e01d0cb48c83e05eb54b0052b60dba6 | a693da8676743148657e3ddb7dbfdc47c50d53a1 | /man/geoconvert.2.Rd | b195e798a8ff12d5a8c3bf66582ec105a209c6c5 | [] | no_license | Hafro/geo | 4de0f08370973b75d8d46003fb8e9a9d536beaff | 6deda168a3c3b2b5ed1237c9d3b2f8b79a3e4059 | refs/heads/master | 2022-11-23T15:06:47.987614 | 2022-11-16T22:26:55 | 2022-11-16T22:26:55 | 38,042,352 | 3 | 6 | null | 2022-11-16T22:26:56 | 2015-06-25T10:09:50 | R | UTF-8 | R | false | true | 611 | rd | geoconvert.2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geoconvert.2.R
\name{geoconvert.2}
\alias{geoconvert.2}
\title{Convert from decimal degrees}
\usage{
geoconvert.2(lat)
}
\arguments{
\item{lat}{Vector of latitude or longitudes}
}
\value{
Returns a vector of six digit values with degrees, minutes and
fractions of minutes, with two decimal values, concatenated.
}
\description{
Convert from decimal degrees to degrees, minutes and fractional minutes
representation (DDMMmm) of lat or lon.
}
\seealso{
Called by \code{\link{geoconvert}}, when \code{inverse = TRUE}.
}
\keyword{manip}
|
c64e32fb58b38d31723efb75cfd425e5955ca435 | fc409801ba3a5c1ba885820257302b2ee8d251fa | /R/compareModels.PopQuants.R | cdd906a5da6e457e0070a3fe727f5707839db427 | [
"MIT"
] | permissive | wStockhausen/rTCSAM02 | 092557d6cb29179a2637db94a62b9c0ce37cdbbb | 44039e8366db3e7fb35edfd4219b311b36fd2ae9 | refs/heads/master | 2023-07-09T07:17:24.410585 | 2023-06-22T21:29:08 | 2023-06-22T21:29:08 | 69,492,060 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,393 | r | compareModels.PopQuants.R | #'
#'@title Compare population quantities from TCSAM2015 and rsimTCSAM model runs.
#'
#'@description Function to compare population quantities from TCSAM2015 and rsimTCSAM model runs.
#'
#'@param tcsams - single TCSAM2015 model report object, or named list of such
#'@param rsims - single rsimTCSAM results object, or named list of such
#'@param showPlot - flag to show/print plots immediately
#'@param pdf - name of pdf file to record plot output to
#'@param width - pdf page width (in inches)
#'@param height - pdf page width (in inches)
#'@param verbose - flag (T/F) to print debug info
#'
#'@return list of ggplot2 objects
#'
#'@details none.
#'
#'@export
#'
compareModels.PopQuants<-function(tcsams=NULL,
rsims=NULL,
showPlot=TRUE,
pdf=NULL,
width=8,
height=6,
verbose=FALSE){
#set up pdf device, if requested
if (!is.null(pdf)){
pdf(file=pdf,width=width,height=height);
on.exit(grDevices::dev.off())
}
if (inherits(tcsams,'tcsam2015.rep')){
tcsams<-list(tcsam=tcsams);#wrap in list
}
if (inherits(rsims,'rsimTCSAM')){
rsims<-list(rsim=rsims);#wrap in list
}
plots<-list();
#abundance trends
if (verbose) cat("Plotting population abundance trends\n");
mdfr<-getMDFR('mr/P_list/N_yxmsz',tcsams,rsims);
dfr<-reshape2::dcast(mdfr,modeltype+model+y+x+m+s~.,fun.aggregate=sum,value.var='val');
p1<-wtsPlots::plotMDFR.XY(
dfr,x='y',value.var='.',faceting='x~m',
plotABline=TRUE,plotPoints=FALSE,
xlab='year',ylab='Abundance (millions)',units="",
linetype='s',guideTitleLineType='',
colour='model',guideTitleColour='');
if (showPlot||!is.null(pdf)) print(p1);
p2<-wtsPlots::plotMDFR.XY(
dfr[dfr$y>=1980,],x='y',value.var='.',faceting='x~m',
plotABline=TRUE,plotPoints=FALSE,
xlab='year',ylab='Abundance (millions)',units="",
linetype='s',guideTitleLineType='',
colour='model',guideTitleColour='');
if (showPlot||!is.null(pdf)) print(p2);
plots$N_yxms<-list(p1,p2);
#biomass trends
if (verbose) cat("Plotting population biomass trends\n");
mdfr<-getMDFR('mr/P_list/B_yxms',tcsams,rsims);
p1<-wtsPlots::plotMDFR.XY(
mdfr,x='y',value.var='val',faceting='x~m',
plotABline=TRUE,plotPoints=FALSE,
xlab='year',ylab='Biomass (1000s t)',units="",
linetype='s',guideTitleLineType='',
colour='model',guideTitleColour='');
if (showPlot||!is.null(pdf)) print(p1);
p2<-wtsPlots::plotMDFR.XY(
mdfr[mdfr$y>=1980,],x='y',value.var='val',faceting='x~m',
plotABline=TRUE,plotPoints=FALSE,
xlab='year',ylab='Biomass (1000s t)',units="",
linetype='s',guideTitleLineType='',
colour='model',guideTitleColour='');
if (showPlot||!is.null(pdf)) print(p2);
plots$B_yxms<-list(p1,p2);
#mature biomass at mating trends
if (verbose) cat("Plotting population mature biomass-at-mating trends\n");
mdfr<-getMDFR('mr/P_list/MB_yx',tcsams,rsims);
p1<-wtsPlots::plotMDFR.XY(
mdfr,x='y',value.var='val',faceting='x~.',
plotABline=TRUE,plotPoints=FALSE,
xlab='year',ylab='Mating Biomass (1000s t)',units="",
colour='model',guideTitleColour='');
if (showPlot||!is.null(pdf)) print(p1);
p2<-wtsPlots::plotMDFR.XY(
mdfr[mdfr$y>=1980,],x='y',value.var='val',faceting='x~.',
plotABline=TRUE,plotPoints=FALSE,
xlab='year',ylab='Mating Biomass (1000s t)',units="",
colour='model',guideTitleColour='');
if (showPlot||!is.null(pdf)) print(p2);
plots$MB_yx<-list(p1,p2);
#recruitment
if (verbose) cat("Plotting recruitment time series\n");
path<-'mp/R_list/R_y';
mdfr<-getMDFR(path,tcsams,rsims);
p<-wtsPlots::plotMDFR.XY(
mdfr,x='y',agg.formula=NULL,faceting=NULL,
xlab='year',ylab='Recruitment',units='millions',lnscale=FALSE,
colour='model',guideTitleColor='',
shape='model',guideTitleShape='');
if (showPlot||!is.null(pdf)) print(p);
plots$R_y<-p;
p<-wtsPlots::plotMDFR.XY(
mdfr,x='y',agg.formula=NULL,faceting=NULL,
xlab='year',ylab='Recruitment',units='millions',lnscale=TRUE,
colour='model',guideTitleColor='',
shape='model',guideTitleShape='');
if (showPlot||!is.null(pdf)) print(p);
plots$lnR_y<-p;
#Population abundance-at-size
if (verbose) cat("Plotting poulation abundance-at-size\n");
path<-'mr/P_list/N_yxmsz';
mdfr<-getMDFR(path,tcsams,rsims);
mdfr<-removeImmOS(mdfr);
p<-wtsPlots::plotMDFR.Bubbles(
mdfr,x='y',y='z',
agg.formula='model+y+x+z',faceting='model~x',
xlab='year',ylab='size (mm CW)',units="millions",
colour='.',guideTitleColour='',useColourGradient=TRUE,alpha=0.5);
if (showPlot||!is.null(pdf)) print(p);
plots$N_yxmsz<-p;
#Population abundance-at-size (bubble plots)
if (verbose) cat("Plotting poulation abundance-at-size (bubble plots)\n");
path<-'mr/P_list/N_yxmsz';
mdfr<-getMDFR(path,tcsams,rsims);
mdfr<-removeImmOS(mdfr);
p<-wtsPlots::plotMDFR.Bubbles(
mdfr,x='y',y='z',
agg.formula='model+y+x+z',faceting='model~x',
xlab='year',ylab='size (mm CW)',units="millions",
colour='.',guideTitleColour='',useColourGradient=TRUE,alpha=0.5);
if (showPlot||!is.null(pdf)) print(p);
plots$N_yxmsz<-p;
#Population abundance-at-size (line plots)
cat("TODO: In compareModels.PopQuants(...), implement plots for population abundance-at-size as line plots.\n");
#p<-compareModels.SizeComps(mdfr);
return(invisible(plots))
}
|
5325471cc40e892aa474363c542211b9bd53753a | 0bebbba10f446ec5be7a378937be76c5e7610ab1 | /missing.R | f9d0beb263ef67cab8fa5237a9c4e2d98991fb62 | [] | no_license | songxh0424/bankruptcy | c3920ca513fce346bb7a3cca35aa3a35fc0293e8 | 004aad6aefb8a3c706db2f2cb180e6e013587f52 | refs/heads/master | 2021-08-24T02:29:49.794192 | 2017-12-07T17:17:04 | 2017-12-07T17:17:04 | 113,479,011 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,712 | r | missing.R | setwd("/Users/Carl/Google Drive/2017 winter/503/project")
library(foreign) # read.arff
library(tidyr)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(MASS)
library(class)
library(randomForest)
library(mice) # multiple imputation
library(missForest) # impute with RF
library(adabag) # boosting
library(caret) # train models
library(doMC) # parallel
library(knitr)
library(sparsediscrim)
load("project.RData")
bank = read.arff("bank/5year.arff")
# remove Attr37, 43% NA
bank = bank[, -37]
summary(bank)
colSums(is.na(bank)) / 5910
# multiple imputation
bank_imputed = mice(bank[, -64], method = "pmm", seed = 1234)
# randomForest imputation
bank_imputed2 = missForest(bank[, -64])
# orginal data with na imputed
dat = bank_imputed2$ximp
dat$class = bank$class
dat = mutate(dat, class = factor(ifelse(class == 1, "Yes", "No"), levels = c("Yes", "No")))
set.seed(1234)
ratio = 0.7
index = sample(1:nrow(dat), floor(ratio * nrow(dat)))
train = dat[index, ]
test = dat[-index, ]
# standardized
dat.std = dat
dat.std[, -64] = scale(dat[, -64])
train.std = dat.std[index, ]
test.std = dat.std[-index, ]
# with number of na as a predictor
dat.na = dat
dat.na$numNA = rowSums(is.na(bank))
train.na = dat.na[index, ]
test.na = dat.na[-index, ]
# with the first 20 PCs
pcacor = princomp(train[, -64], cor = TRUE)
train.pca = as.data.frame(pcacor$scores[,1:20])
train.pca$class = train$class
names(train.pca)[1:20] = paste("Comp.", 1:20, sep = "")
test.pca = as.data.frame(as.matrix(test[,-64] - outer(rep(1,nrow(test)),colMeans(train[,-64]))) %*% as.matrix(pcacor$loadings[,1:20]))
test.pca$class = test$class
names(test.pca)[1:20] = paste("Comp.", 1:20, sep = "")
summary(train$class)
summary(test$class)
|
13efbb1a30caa07a76a43098e6420f8b4be81fdf | e9b555e22cf3f49cd5a3f7f115e4afe10173985d | /code/metrics-paper.R | 0ab3368228a3326320975630497ebd4e36fba5d3 | [] | no_license | niladrir/metrics-paper | 28af7592ade1c991d07f57d416e8e9bf58edc7c0 | 3889ce9e0468c8d9dc2f8f9d069612d1e20c1a19 | refs/heads/master | 2021-01-23T13:29:15.772053 | 2017-04-24T03:47:12 | 2017-04-24T03:47:12 | 9,685,813 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 70,767 | r | metrics-paper.R | library(nullabor)
library(ggplot2)
library(plyr)
library(reshape)
library(fpc)
library(tourr)
##====================================Distance Metrics===========================================
## Distance based on Boxplots with indexing
box_dist_indx <- function(i, j){
X <- lineup.dat[lineup.dat$.sample == i, ]
PX <- lineup.dat[lineup.dat$.sample == j, ]
X.sum <- ddply(X, .(group), summarize, sum.stat = quantile(val, c(0.25, 0.5, 0.75)))
PX.sum <- ddply(PX, .(group), summarize, sum.stat = quantile(val, c(0.25, 0.5, 0.75)))
abs.diff.X <- abs(X.sum$sum.stat[X.sum$group == levels(X.sum$group)[1]] - X.sum$sum.stat[X.sum$group == levels(X.sum$group)[2]])
abs.diff.PX <- abs(PX.sum$sum.stat[PX.sum$group == levels(PX.sum$group)[1]] - PX.sum$sum.stat[PX.sum$group == levels(PX.sum$group)[2]])
sqrt(sum((abs.diff.X - abs.diff.PX)^2))
}
## Distance based on Boxplots: No indexing
box_dist <- function(X, PX){
X.sum <- ddply(X, .(group), summarize, sum.stat = quantile(val, c(0.25, 0.5, 0.75)))
PX.sum <- ddply(PX, .(group), summarize, sum.stat = quantile(val, c(0.25, 0.5, 0.75)))
abs.diff.X <- abs(X.sum$sum.stat[X.sum$group == levels(X.sum$group)[1]] - X.sum$sum.stat[X.sum$group == levels(X.sum$group)[2]])
abs.diff.PX <- abs(PX.sum$sum.stat[PX.sum$group == levels(PX.sum$group)[1]] - PX.sum$sum.stat[PX.sum$group == levels(PX.sum$group)[2]])
sqrt(sum((abs.diff.X - abs.diff.PX)^2))
}
### Regression based distance with indexing
reg_bin_indx <- function(i, j, nbins = 1){
X <- lineup.dat[lineup.dat$.sample == i, ]
PX <- lineup.dat[lineup.dat$.sample == j, ]
ss <- seq(min(X[,1]), max(X[,1]), length = nbins + 1)
beta.X <- NULL ; beta.PX <- NULL
for(k in 1:nbins){
X.sub <- subset(X, X[,1] >= ss[k] & X[,1] <= ss[k + 1])
PX.sub <- subset(PX, X[,1] >= ss[k] & X[,1] <= ss[k + 1])
b.X <- as.numeric(coef(lm(X.sub[,2] ~ X.sub[,1])))
b.PX <- as.numeric(coef(lm(PX.sub[,2] ~ PX.sub[,1])))
beta.X <- rbind(beta.X, b.X)
beta.PX <- rbind(beta.PX, b.PX)
}
beta.X <- subset(beta.X, !is.na(beta.X[,2]))
beta.PX <- subset(beta.PX, !is.na(beta.PX[,2]))
sum((beta.X[,1] - beta.PX[,1])^2 + (beta.X[,2] - beta.PX[,2])^2)
}
### Regression based distance: No indexing
reg_bin <- function(X, PX, nbins = 1){
# X <- lineup.dat[lineup.dat$.sample == i, ]
# PX <- lineup.dat[lineup.dat$.sample == j, ]
ss <- seq(min(X[,1]), max(X[,1]), length = nbins + 1)
beta.X <- NULL ; beta.PX <- NULL
for(k in 1:nbins){
X.sub <- subset(X, X[,1] >= ss[k] & X[,1] <= ss[k + 1])
PX.sub <- subset(PX, X[,1] >= ss[k] & X[,1] <= ss[k + 1])
b.X <- as.numeric(coef(lm(X.sub[,2] ~ X.sub[,1])))
b.PX <- as.numeric(coef(lm(PX.sub[,2] ~ PX.sub[,1])))
beta.X <- rbind(beta.X, b.X)
beta.PX <- rbind(beta.PX, b.PX)
}
beta.X <- subset(beta.X, !is.na(beta.X[,2]))
beta.PX <- subset(beta.PX, !is.na(beta.PX[,2]))
sum((beta.X[,1] - beta.PX[,1])^2 + (beta.X[,2] - beta.PX[,2])^2)
}
### Distance for Univariate Data
dist_uni_indx <- function(i, j){
xx <- lineup.dat[lineup.dat$.sample == i, 1]
yy <- lineup.dat[lineup.dat$.sample == j, 1]
stat.xx <- c(mean(xx), sd(xx), moments::skewness(xx), moments::kurtosis(xx))
stat.yy <- c(mean(yy), sd(yy), moments::skewness(yy), moments::kurtosis(yy))
sqrt(sum((stat.xx - stat.yy)^2))
}
####Modified Binned Distance with indexing
bdist_mod_indx <- function(i,j, nbin.X = 5, nbin.Y = 5) {
X <- lineup.dat[lineup.dat$.sample == i,]
PX <- lineup.dat[lineup.dat$.sample == j,]
if(!is.numeric(X[,1])){
X[,1] <- as.numeric(X[,1])
nij <- as.numeric(table(cut(X[,1], breaks=seq(min(X[,1]), max(X[,1]),length.out = length(unique(X[,1])) + 1), include.lowest = TRUE),cut(X[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
}else
nij <- as.numeric(table(cut(X[,1], breaks=seq(min(lineup.dat[,1]), max(lineup.dat[,1]),length.out = nbin.X + 1), include.lowest = TRUE),cut(X[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
if(!is.numeric(PX[,1])){
PX[,1] <- as.numeric(PX[,1])
mij <- as.numeric(table(cut(PX[,1], breaks=seq(min(X[,1]), max(X[,1]),length.out = length(unique(X[,1])) + 1), include.lowest = TRUE),cut(PX[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
}else
mij <- as.numeric(table(cut(PX[,1], breaks=seq(min(lineup.dat[,1]), max(lineup.dat[,1]),length.out = nbin.X + 1), include.lowest = TRUE),cut(PX[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
sqrt(sum((nij-mij)^2))
}
####Modified Binned Distance: No indexing
# # misses parameter lineup.dat - use package nullabor
# bin_dist <- function(X,PX, nbin.X = 5, nbin.Y = 5) {
# if(!is.numeric(X[,1])){
# X[,1] <- as.numeric(X[,1])
# nij <- as.numeric(table(cut(X[,1], breaks=seq(min(X[,1]), max(X[,1]),length.out = length(unique(X[,1])) + 1), include.lowest = TRUE),cut(X[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
# }else
# nij <- as.numeric(table(cut(X[,1], breaks=seq(min(lineup.dat[,1]), max(lineup.dat[,1]),length.out = nbin.X + 1), include.lowest = TRUE),cut(X[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
# if(!is.numeric(PX[,1])){
# PX[,1] <- as.numeric(PX[,1])
# mij <- as.numeric(table(cut(PX[,1], breaks=seq(min(X[,1]), max(X[,1]),length.out = length(unique(X[,1])) + 1), include.lowest = TRUE),cut(PX[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
# }else
# mij <- as.numeric(table(cut(PX[,1], breaks=seq(min(lineup.dat[,1]), max(lineup.dat[,1]),length.out = nbin.X + 1), include.lowest = TRUE),cut(PX[,2], breaks=seq(min(lineup.dat[,2]), max(lineup.dat[,2]),length.out = nbin.Y + 1), include.lowest = TRUE)))
# sqrt(sum((nij-mij)^2))
# }
##Weighted Bin Distance with indexing
wbdist_indx <- function(i,j, nbins=10) {
X <- lineup.dat[lineup.dat$.sample == i,]
PX <- lineup.dat[lineup.dat$.sample == j,]
d1 <- MASS::kde2d(X[,1],X[,2],n=nbins,lims=c(range(X[,1]), range(X[,2])))
d2 <- MASS::kde2d(PX[,1],PX[,2],n=nbins,lims=c(range(PX[,1]), range(PX[,2])))
sqrt(sum((d1$z-d2$z)^2)/(sum(d1$z^2) * sum(d2$z^2)))
}
## Distances based on separation: No Indexing
min_sep_dist <- function(X, PX, clustering = FALSE, nclust = 3){
require(fpc)
dX <- dist(X[,1:2])
dPX <- dist(PX[,1:2])
if(clustering){
X$cl <- X[,3]
PX$cl <- PX[,3]
X.clus <- sort(cluster.stats(dX, clustering = X$cl)$separation)
PX.clus <- sort(cluster.stats(dPX, clustering = X$cl)$separation)
}
else{
complete.X <- cutree(hclust(dX), nclust)
complete.PX <- cutree(hclust(dPX), nclust)
X.clus <- sort(cluster.stats(dX, complete.X)$separation)
PX.clus <- sort(cluster.stats(dPX, complete.PX)$separation)
}
sqrt(sum((X.clus - PX.clus)^2))
}
## Distances based on separation with indexing
min_sep_dist_indx <- function(i, j, clustering = FALSE, nclust = 3){
X <- lineup.dat[lineup.dat$.sample == i,]
PX <- lineup.dat[lineup.dat$.sample == j,]
require(fpc)
dX <- dist(X[,1:2])
dPX <- dist(PX[,1:2])
if(clustering){
X$cl <- X[,3]
PX$cl <- PX[,3]
X.clus <- sort(cluster.stats(dX, clustering = X$cl)$separation)
PX.clus <- sort(cluster.stats(dPX, clustering = X$cl)$separation)
}
else{
complete.X <- cutree(hclust(dX), nclust)
complete.PX <- cutree(hclust(dPX), nclust)
X.clus <- sort(cluster.stats(dX, complete.X)$separation)
PX.clus <- sort(cluster.stats(dPX, complete.PX)$separation)
}
sqrt(sum((X.clus - PX.clus)^2))
}
### Distances based on average separation
ave_sep_dist <- function(X, PX, clustering = FALSE, nclust = 3) {
dX <- dist(X[, 1:2])
dPX <- dist(PX[, 1:2])
if (clustering) {
X$cl <- X[, 3]
PX$cl <- PX[, 3]
X.clus <- sort(cluster.stats(dX, clustering = X$cl)$average.toother)
PX.clus <- sort(cluster.stats(dPX, clustering = PX$cl)$average.toother)
} else {
complete.X <- cutree(hclust(dX), nclust)
complete.PX <- cutree(hclust(dPX), nclust)
X.clus <- sort(cluster.stats(dX, complete.X)$average.toother)
PX.clus <- sort(cluster.stats(dPX, complete.PX)$average.toother)
}
sqrt(sum((X.clus - PX.clus)^2))
}
### Distances based on average separation with indexing
ave_sep_dist_indx <- function(i, j, clustering = FALSE, nclust = 3){
X <- lineup.dat[lineup.dat$.sample == i,]
PX <- lineup.dat[lineup.dat$.sample == j,]
dX <- dist(X[,1:2])
dPX <- dist(PX[,1:2])
if(clustering){
X$cl <- X[,3]
PX$cl <- PX[,3]
X.clus <- sort(cluster.stats(dX, clustering = X$cl)$average.toother)
PX.clus <- sort(cluster.stats(dPX, clustering = PX$cl)$average.toother)
}
else{
complete.X <- cutree(hclust(dX), nclust)
complete.PX <- cutree(hclust(dPX), nclust)
X.clus <- sort(cluster.stats(dX, complete.X)$average.toother)
PX.clus <- sort(cluster.stats(dPX, complete.PX)$average.toother)
}
sqrt(sum((X.clus - PX.clus)^2))
}
### hausdorff Distance
##====================================Application to the Turk Experiment=====================================================
pos.1 <- 1:20
pos.2 <- 1:20
dat.pos <- expand.grid(pos.1 = pos.1, pos.2 = pos.2)
###====================================Turk 1 Experiment===============================================================
files <- dir("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/exp1")
results <- NULL
for(k in files){
### Reading the lineup data
dat <- read.table(paste("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/exp1/",k, sep = ""), header = T)
#dat <- read.table(paste("U:/Documents/Research/Permutation/exp1/",i, sep = ""), header = T)
### Melting the data
dat.m <- melt(dat, id = c("age", "grp", "weight"))
### Changing the categorical variable to a numerical variable
dat.m$x <- as.numeric(dat.m$grp)
### Breaking the variable name to get the position and type of plot (null or obs)
dat.m$plot <- substring(dat.m$variable, 1, 3)
dat.m$position <- substring(dat.m$variable, 4, 5)
### Finding the observed data
obs <- dat.m[dat.m$plot == "obs", c("x", "value") ]
### Storing the pic name
file.name <- k
split.file.name <- unlist(strsplit(file.name,"\\."))
split.2 <- unlist(strsplit(split.file.name[1],"\\_"))
pic_name <- paste("plot_", split.2[2],"_", split.2[3], "_", split.2[4], "_", split.2[5], "_", split.2[6], ".png", sep = "" )
### Calculating the distance metrics on the lineups. Only binned, weighted bin, canberra distance
### and hausdorff are calculated.
dat.m <- dat.m[, c("x", "value", "position")]
names(dat.m) <- c("group", "val", ".sample")
lineup.dat <- dat.m
lineup.dat$group <- as.factor(lineup.dat$group)
stat <- ddply(dat.pos,.(pos.1, pos.2),summarize,box.dist = box_dist_indx(pos.1, pos.2), bin.dist = bdist_mod_indx(pos.1, pos.2, nbin.X = 8, nbin.Y = 8) )
res <- data.frame(pic_name, stat)
results <- rbind(results, res)
}
lineup.dat$.sample <- as.numeric(lineup.dat$.sample)
#write.table(results, "turk1-metrics.txt", row.names = F)
#results <- read.table("turk1-metrics.txt", header = T)
qplot(factor(group), val, data = lineup.dat, geom = "boxplot", col = group, ylab = "", xlab = "group") + facet_wrap(~.sample)
#ggsave("turk1-diff-box-prop.pdf", height = 5, width = 5.5)
res.exp1 <- read.csv("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/raw_data_turk1.csv")
res.exp1 <- subset(res.exp1, select = c(pic_name, response, plot_location, time_taken))
res.dat <- ddply(res.exp1, .(pic_name), summarize, prop = sum(response)/length(response), pos = mean(plot_location), m.time = median(time_taken))
metrics.sub <- subset(results, pos.1 != pos.2)
dat.merge <- merge(metrics.sub, res.dat, by = "pic_name")
dat.merge <- subset(dat.merge, pos.2 != pos)
dd <- ddply(dat.merge, .(pic_name, pos.1), summarize, box.mean = mean(box.dist), bin.mean = mean(bin.dist), len = length(box.dist), prop = mean(prop), m.time = mean(m.time))
prop.dist <- ddply(dd, .(pic_name), summarize, diff.box = box.mean[len == 19] - max(box.mean[len == 18]), grtr.box = sum(box.mean[len == 18] > box.mean[len == 19]) , diff.bin = bin.mean[len == 19] - max(bin.mean[len == 18]), grtr.bin = sum(bin.mean[len == 18] > bin.mean[len == 19]) , prop = mean(prop), m.time = mean(m.time))
prop.dist$special <- ifelse(prop.dist$pic_name == "plot_turk1_100_8_12_2.png", 1, 0)
### Faceting
prop.diff <- subset(prop.dist, select = c(pic_name, diff.box, diff.bin, prop, m.time, special))
prop.diff.m <- melt(prop.diff, id = c("pic_name", "prop", "special", "m.time"))
levels(prop.diff.m$variable) <- c("Boxplot Based Distance", "Binned Distance")
qplot(value, prop, data = prop.diff.m, geom = "point", size = I(3), ylim = c(0, 1), xlab = "Difference", ylab = "Detection Rate", shape = factor(special)) + geom_smooth(se = FALSE) + facet_wrap( ~ variable, scales = "free_x") + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("turk1-prop-box-bin.pdf", height = 4, width = 8.5)
qplot(value, m.time, data = prop.diff.m, geom = "point", size = I(3), xlab = "Difference", ylab = "Mean Time to Respond", shape = factor(special)) + facet_wrap( ~ variable, scales = "free_x") + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("turk1-mtime-box-bin.pdf", height = 4, width = 8.5)
grtr.diff <- subset(prop.dist, select = c(pic_name, grtr.box, grtr.bin, prop, special))
grtr.diff.m <- melt(grtr.diff, id = c("pic_name", "prop", "special"))
levels(grtr.diff.m$variable) <- c("Boxplot Based Distance", "Binned Distance")
qplot(value, prop, data = grtr.diff.m, geom = "point", size = I(3), ylim = c(0, 1), xlab = "Greater than Observed Plot", ylab = "Detection Rate", shape = factor(special)) + facet_wrap( ~ variable) + theme(legend.position = "none")
ggsave("turk1-grtr-box-bin.pdf", height = 4, width = 8.5)
### Individual Plots
qplot(diff.box, prop, data = prop.dist, ylim = c(0, 1), size = I(3), xlab = "Difference", ylab = "Detection Rate", shape = factor(special)) + geom_smooth( se = FALSE) + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("turk1-diff-box-prop.pdf", height = 5, width = 5.5)
qplot(factor(grtr.box), prop, data = prop.dist, ylim = c(0, 1), size = I(3), alpha = I(0.6), xlab = "Greater than observed plot", ylab = "Detection Rate", shape = factor(special)) + theme(legend.position = "none")
ggsave("turk1-grtr-box-prop.pdf", height = 5, width = 5.5)
qplot(diff.bin, prop, data = prop.dist, ylim = c(0, 1), size = I(3), xlab = "Difference", ylab = "Detection Rate", shape = factor(special)) + theme(legend.position = "none") + geom_smooth( se = FALSE) + geom_vline(xintercept = 0, col = "red")
ggsave("turk1-diff-bin-prop-8.pdf", height = 5, width = 5.5)
qplot(factor(grtr.bin), prop, data = prop.dist, ylim = c(0, 1), size = I(3), alpha = I(0.6), xlab = "greater than observed plot", ylab = "Detection Rate", shape = factor(special)) + theme(legend.position = "none")
ggsave("turk1-grtr-bin-prop-8.pdf", height = 5, width = 5.5)
qplot(diff.box, m.time, data = prop.dist, size = I(3), xlab = "difference", ylab = "median time to respond") + geom_smooth(method = "lm", se = FALSE) + geom_vline(xintercept = 0, col = "red")
ggsave("turk1-diff-box-mtime.pdf", height = 5, width = 5.5)
qplot(diff.bin, m.time, data = prop.dist, size = I(3), xlab = "difference", ylab = "median time to respond") + geom_smooth(method = "lm", se = FALSE) + geom_vline(xintercept = 0, col = "red")
ggsave("turk1-diff-bin-mtime.pdf", height = 5, width = 5.5)
###============================================================================
###Turk 2 Experiment
###============================================================================
files.png <- dir("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/exp2","*.png")
files.txt <- dir("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/exp2","*.txt")
metrics <- NULL
for(i in 1:length(files.txt)){
dat <- read.table(paste("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/exp2/",files.txt[i], sep = ""), header = T)
dat.m <- melt(dat, id = "X")
dat.m$.sample <- substring(dat.m$variable, 2)
lineup.dat <- data.frame(x = dat.m$X, z = dat.m$value, .sample = dat.m$.sample)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, reg.bin = reg_bin_indx(pos.1, pos.2), bin.dist = bdist_mod_indx(pos.1, pos.2, nbin.X = 2, nbin.Y = 2), reg.no.int = reg_no_int_indx(pos.1, pos.2))
metrics.dat <- data.frame(metrics.dat, pic_name = files.png[i])
metrics <- rbind(metrics, metrics.dat)
}
#write.table(metrics, "turk2-metrics.txt", row.names = F)
#lineup.dat$.sample <- as.numeric(lineup.dat$.sample)
#qplot(x,z, data = lineup.dat, geom = "point", alpha = I(0.5), xlab = "X", ylab = "Y") + geom_smooth(method = "lm", se = FALSE) + facet_wrap(~.sample)
res.exp2 <- read.csv("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/raw_data_turk2.csv")
res.exp2 <- subset(res.exp2, select = c(pic_name, response, plot_location, time_taken))
res.dat <- ddply(res.exp2, .(pic_name), summarize, prop = sum(response)/length(response), pos = mean(plot_location), m.time = median(time_taken))
metrics.sub <- subset(metrics, pos.1 != pos.2)
dat.merge <- merge(metrics.sub, res.dat, by = "pic_name")
dat.merge <- subset(dat.merge, pos.2 != pos)
dd <- ddply(dat.merge, .(pic_name, pos.1), summarize, reg.mean = mean(reg.bin), bin.mean = mean(bin.dist), reg.no.int.mean = mean(reg.no.int), len = length(reg.bin), prop = mean(prop), m.time = mean(m.time))
prop.dist <- ddply(dd, .(pic_name), summarize, diff.reg = reg.mean[len == 19] - max(reg.mean[len == 18]), grtr.reg = sum(reg.mean[len == 18] > reg.mean[len == 19]), diff.reg.no.int = reg.no.int.mean[len == 19] - max(reg.no.int.mean[len == 18]), grtr.reg.no.int = sum(reg.no.int.mean[len == 18] > reg.no.int.mean[len == 19]), diff.bin = bin.mean[len == 19] - max(bin.mean[len == 18]), grtr.bin = sum(bin.mean[len == 18] > bin.mean[len == 19]), prop = mean(prop), m.time = mean(m.time))
prop.dist$special <- ifelse(prop.dist$pic_name == "plot_turk2_100_350_12_3.png", 1, 0)
### Facetted Plots
prop.diff <- subset(prop.dist, select = c(pic_name, diff.reg, diff.bin, prop, m.time, special))
prop.diff.m <- melt(prop.diff, id = c("pic_name", "prop", "special", "m.time"))
levels(prop.diff.m$variable) <- c("Regression Based Distance", "Binned Distance")
qplot(value, prop, data = prop.diff.m, geom = "point", size = I(3), xlab = "Difference", ylab = "Detection Rate", shape = factor(special)) + geom_smooth(se = FALSE) + facet_wrap( ~ variable, scales = "free_x") + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("turk2-prop-reg-bin.pdf", height = 4, width = 8.5)
qplot(value, m.time, data = prop.diff.m, geom = "point", size = I(3), xlab = "Difference", ylab = "Mean Time to Respond", shape = factor(special)) + facet_wrap( ~ variable, scales = "free_x") + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("turk2-mtime-reg-bin.pdf", height = 4, width = 8.5)
grtr.diff <- subset(prop.dist, select = c(pic_name, grtr.reg, grtr.bin, prop, special))
grtr.diff.m <- melt(grtr.diff, id = c("pic_name", "prop", "special"))
levels(grtr.diff.m$variable) <- c("Regression Based Distance", "Binned Distance")
qplot(value, prop, data = grtr.diff.m, geom = "point", size = I(3), ylim = c(0, 1), xlab = "Greater than Observed Plot", ylab = "Detection Rate", shape = factor(special)) + facet_wrap( ~ variable) + theme(legend.position = "none")
ggsave("turk2-grtr-reg-bin.pdf", height = 4, width = 8.5)
### Individual Plots
qplot(diff.reg, prop, data = prop.dist, size = I(3), xlab = "Difference", ylab = "Detection Rate", shape = factor(special)) + geom_smooth(se = FALSE) + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("turk2-diff-reg-prop.pdf", height = 5, width = 5.5)
qplot(factor(grtr.reg), prop, data = prop.dist, size = I(3), xlab = "Greater than observed plot", ylab = "Detection Rate", shape = factor(special)) + theme(legend.position = "none")
ggsave("turk2-grtr-reg-prop.pdf", height = 5, width = 5.5)
qplot(diff.bin, prop, data = prop.dist, size = I(3), xlab = "Difference", ylab = "Detection Rate", shape = factor(special)) + geom_smooth(se = FALSE) + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("turk2-diff-bin-prop-2.pdf", height = 5, width = 5.5)
qplot(factor(grtr.bin), prop, data = prop.dist, size = I(3), xlab = "Greater than observed plot", ylab = "Detection Rate", shape = factor(special)) + theme(legend.position = "none")
ggsave("turk2-grtr-bin-prop-2.pdf", height = 5, width = 5.5)
qplot(diff.reg, m.time , data = prop.dist, size = I(3), xlab = "difference", ylab = "median time to respond") + geom_smooth(method = "lm", se = FALSE) + geom_vline(xintercept = 0, col = "red")
ggsave("turk2-diff-reg-mtime.pdf", height = 5, width = 5.5)
qplot(diff.bin, m.time , data = prop.dist, size = I(3), xlab = "difference", ylab = "median time to respond") + geom_smooth(method = "lm", se = FALSE) + geom_vline(xintercept = 0, col = "red")
ggsave("turk2-diff-bin-mtime.pdf", height = 5, width = 5.5)
###===================================================================================================
###Large p, small n
###===================================================================================================
files.png <- dir("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/large-p-exp","*.png")
files.txt <- dir("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/large-p-exp","*.txt")
metrics1 <- NULL
metrics2 <- NULL
for(i in 1:length(files.txt)){
dat <- read.table(paste("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/large-p-exp/",files.txt[i], sep = ""), header = T)
if(dim(dat)[2] == 4){
lineup.dat <- data.frame(x = dat$x, z = dat$cl, cl = dat$cl, .sample = dat$.sample)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, b.mod = bdist_mod_indx(pos.1, pos.2, nbin.X = 10, nbin.Y = 10), sep.dist = min_sep_dist_indx(pos.1, pos.2, clustering = TRUE, nclust = 2), ave.dist = ave_sep_dist_indx(pos.1, pos.2, clustering = TRUE, nclust = 2))
metrics.dat1 <- data.frame(metrics.dat, pic_name = files.png[i])
metrics1 <- rbind(metrics1, metrics.dat1)
}
if(dim(dat)[2] == 6){
lineup.dat <- data.frame(x = dat$X1, z = dat$X2, cl = dat$cl, .sample = dat$.sample)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, b.mod = bdist_mod_indx(pos.1, pos.2, nbin.X = 5, nbin.Y = 5), sep.dist = min_sep_dist_indx(pos.1, pos.2, clustering = TRUE, nclust = 3), ave.dist = ave_sep_dist_indx(pos.1, pos.2, clustering = TRUE, nclust = 2))
metrics.dat2 <- data.frame(metrics.dat, pic_name = files.png[i])
metrics2 <- rbind(metrics2, metrics.dat2)
}
metrics <- rbind(metrics1, metrics2)
}
#write.table(metrics, "largep-metrics.txt", row.names = F)
#qplot(X1, X2, data = dat, geom = "point", alpha = I(0.7), color = factor(cl)) + facet_wrap(~.sample) + scale_colour_discrete(name = "Group")
res.exp.lp <- read.csv("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/raw_data_turk7.csv")
res.exp.lp <- subset(res.exp.lp, pic_name %in% files.png, select = c(pic_name, response, plot_location, time_taken))
res.dat <- ddply(res.exp.lp, .(pic_name), summarize, prop = sum(response)/length(response), pos = mean(plot_location), m.time = mean(time_taken))
metrics.sub <- subset(metrics, pos.1 != pos.2)
dat.merge <- merge(metrics.sub, res.dat, by = "pic_name")
dat.merge <- subset(dat.merge, pos.2 != pos)
dd <- ddply(dat.merge, .(pic_name, pos.1), summarize, bin.mean = mean(b.mod), sep.mean = mean(sep.dist), ave.mean = mean(ave.dist), len = length(b.mod), prop = mean(prop), m.time = mean(m.time))
prop.dist <- ddply(dd, .(pic_name), summarize, diff.bin = bin.mean[len == 19] - max(bin.mean[len == 18]), diff.sep = sep.mean[len == 19] - max(sep.mean[len == 18]), diff.ave = ave.mean[len == 19] - max(ave.mean[len == 18]), prop = mean(prop), m.time = mean(m.time))
#write.csv(prop.dist, "prop.dist.csv", row.names = FALSE) ## This includes min separation,
## average separation, wb.ratio and average between matrix.
#prop.dist <- read.csv("prop.dist.csv")
#ggpairs(prop.dist[, 2:7])
library(GGally)
ggpairs(prop.dist[, -1])
nomatch.sep.ave <- subset(prop.dist, (diff.sep > 0 & diff.ave < 0) | (diff.sep < 0 & diff.ave > 0))
prop.dist <- ddply(dd, .(pic_name), summarize, diff.bin = bin.mean[len == 19] - max(bin.mean[len == 18]), grtr.bin = sum(bin.mean[len == 18] > bin.mean[len == 19]), diff.sep = sep.mean[len == 19] - max(sep.mean[len == 18]), grtr.sep = sum(sep.mean[len == 18] > sep.mean[len == 19]), diff.ave = ave.mean[len == 19] - max(ave.mean[len == 18]), grtr.ave = sum(ave.mean[len == 18] > ave.mean[len == 19]), prop = mean(prop), m.time = mean(m.time))
prop.dist$shape <- ifelse(prop.dist$pic_name == "plot_large_p_small_n_30_100_0_2_3.png", 1, 0)
### Facetted Plots
prop.diff <- subset(prop.dist, select = c(pic_name, diff.sep, diff.ave, diff.bin, prop, m.time, shape))
prop.diff.m <- melt(prop.diff, id = c("pic_name", "prop", "m.time", "shape"))
levels(prop.diff.m$variable) <- c("Minimum Separation", "Average Separation","Binned Distance")
qplot(value, prop, data = prop.diff.m, geom = "point", size = I(3), xlab = "Difference", ylab = "Detection Rate", shape = factor(shape)) + geom_smooth(se = FALSE) + facet_wrap( ~ variable, scales = "free_x") + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("largep-prop-sep-bin.pdf", height = 4, width = 8.5)
qplot(value, m.time, data = prop.diff.m, geom = "point", size = I(3), xlab = "Difference", ylab = "Mean Time to Respond", shape = factor(shape)) + facet_wrap( ~ variable, scales = "free_x") + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("largep-mtime-sep-bin.pdf", height = 4, width = 8.5)
grtr.diff <- subset(prop.dist, select = c(pic_name, grtr.sep, grtr.ave, grtr.bin, prop, shape))
grtr.diff.m <- melt(grtr.diff, id = c("pic_name", "prop", "shape"))
levels(grtr.diff.m$variable) <- c("Minimum Separation", "Average Separation","Binned Distance")
qplot(value, prop, data = grtr.diff.m, geom = "point", size = I(3), ylim = c(0, 1), xlab = "Greater than Observed Plot", ylab = "Detection Rate") + facet_wrap( ~ variable) + theme(legend.position = "none")
ggsave("largep-grtr-sep-bin.pdf", height = 4, width = 8.5)
### Individual Plots
qplot(diff.bin, prop, data = prop.dist, size = I(3), xlab = "Difference", ylab = "Detection Rate", shape = factor(special)) + geom_smooth(se = FALSE) + geom_vline(xintercept = 0, col = "red") + theme(legend.position = "none")
ggsave("largep-diff-bin-prop-10-5.pdf", height = 5, width = 5.5)
qplot(diff.bin, m.time, data = prop.dist, size = I(3), xlab = "Difference", ylab = "Median time to respond") + geom_smooth(method = "lm",se = FALSE) + geom_vline(xintercept = 0, col = "red")
ggsave("largep-diff-bin-mtime.pdf", height = 5, width = 5.5)
qplot(diff.sep, prop, data = prop.dist, size = I(3), xlab = "Difference", ylab = "Detection Rate")+ geom_smooth(se = FALSE) + geom_vline(xintercept = 0, col = "red")+ theme(legend.position = "none")
ggsave("largep-diff-clus-prop.pdf", height = 5, width = 5.5)
qplot(diff.sep, m.time, data = prop.dist, size = I(3), xlab = "Difference", ylab = "Median time to respond") + geom_smooth(method = "lm",se = FALSE) + geom_vline(xintercept = 0, col = "red")
ggsave("largep-diff-clus-mtime.pdf", height = 5, width = 5.5)
qplot(factor(grtr.bin), prop, data = prop.dist, size = I(3), xlab = "Greater than observed plot", ylab = "Detection Rate", alpha = I(0.6), shape = factor(special)) + theme(legend.position = "none")
ggsave("largep-grtr-bin-prop-10-5.pdf", height = 5, width = 5.5)
qplot(factor(grtr.sep), prop, data = prop.dist, size = I(3), xlab = "greater than observed plot", ylab = "Detection Rate", alpha = I(0.6), shape = factor(special)) + theme(legend.position = "none")
ggsave("largep-grtr-clus-prop.pdf", height = 5, width = 5.5)
qplot(factor(grtr.sep), m.time, data = prop.dist, size = I(3), xlab = "greater than observed plot", ylab = "prop correct", alpha = I(0.6))
time.dist <- merge(prop.dist, res.exp.lp, by = "pic_name")
qplot(diff.bin, time_taken, data = subset(time.dist, time_taken < 250), size = I(3), alpha = I(0.3)) + geom_smooth(method = "lm", se = FALSE)
qplot(diff.sep, time_taken, data = subset(time.dist, time_taken < 250), size = I(3), alpha = I(0.3)) + geom_smooth(method = "lm", se = FALSE)
####============================================================================
## Lendie's Data - Turk 10 Experiment
####============================================================================
files.csv <- dir("/Users/Niladri/Documents/Research/Permutation/Adam's Data/Adam's and Lendie's data/turk10/lineups/data/", "*.csv")
pic_details <- read.csv("/Users/Niladri/Documents/Research/Permutation/Adam's Data/Adam's and Lendie's data/turk10/lineups/picture-details.csv")
files.svg.all <- dir("/Users/Niladri/Documents/Research/Permutation/Adam's Data/Adam's and Lendie's data/turk10/lineups/images/", "*.svg")
files.svg <- matrix(unlist(strsplit(as.character(pic_details$pic_name), "/")), ncol = 2, byrow = TRUE)[,2]
res.lendie <- read.csv("/Users/Niladri/Documents/Research/Permutation/Adam's Data/Adam's and Lendie's data/turk10/raw_data_turk10.csv")
res.lendie <- subset(res.lendie, pic_name %in% files.svg, select = c(pic_name, response, response_no, plot_location, time_taken))
res.dat <- ddply(res.lendie, .(pic_name), summarize, prop = sum(response)/length(response), pos = mean(plot_location), m.time = median(time_taken))
metrics <- NULL
for(i in 1:length(files.csv)){
dat <- read.table(paste("/Users/Niladri/Documents/Research/Permutation/Adam's Data/Adam's and Lendie's data/turk10/lineups/data/",files.csv[i], sep = ""), header = T, sep = ",")
lineup.dat <- data.frame(x = dat$naive1.qq.x, y = dat$naive1.qq.y, .sample = dat$.n)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, reg.bin = reg_bin_indx(pos.1, pos.2), bin.dist = bdist_mod_indx(pos.1, pos.2, nbin.X = 5, nbin.Y = 5))
metrics.dat <- data.frame(metrics.dat, data_name = files.csv[i])
metrics <- rbind(metrics, metrics.dat)
}
pic_details <- subset(pic_details, select = c("pic_name","data_name"))
pic_details$pic_name <- files.svg
metrics.sub <- subset(metrics, pos.1 != pos.2)
metrics.m <- merge(metrics.sub, pic_details, by = "data_name")
res.mer <- merge(metrics.m, res.dat, by = "pic_name")
dat.merge <- subset(res.mer, pos.2 != pos)
dd <- ddply(dat.merge, .(pic_name, data_name, pos.1), summarize, reg.mean = mean(reg.bin), bin.mean = mean(bin.dist), len = length(bin.dist), prop = mean(prop), m.time = mean(m.time))
prop.dist <- ddply(dd, .(pic_name, data_name), summarize, diff.bin = bin.mean[len == 19] - max(bin.mean[len == 18]), grtr.bin = sum(bin.mean[len == 18] > bin.mean[len == 19]), diff.reg = reg.mean[len == 19] - max(reg.mean[len == 18]), grtr.reg = sum(reg.mean[len == 18] > reg.mean[len == 19]), prop = mean(prop), m.time = mean(m.time))
qplot(diff.reg, prop, data = prop.dist)
qplot(diff.bin, prop, data = prop.dist)
### Metrics Example
set.seed(1500)
X1 <- rnorm(50, 10, 2)
X2 <- NULL
for(i in 1:50){
X2[i] <- rnorm(1, mean = 3 + 0.7*(X1[i] - 10), sd = sqrt(4*(1 - 0.7^2)))
}
true.dat <- data.frame(X1 = X1, X2 = X2)
cor(X1, X2)
qplot(X1, X2, geom = "point", size = I(3))
ggsave("dat-example-1.pdf", height = 3.5, width = 3.5)
binplot <- function(x, y, nbins= 8, plot=TRUE) {
xd <- cut(x, breaks=nbins, labels=as.character(1:nbins))
yd <- cut(y, breaks=nbins, labels=as.character(1:nbins))
ndf <- as.data.frame(xtabs(~yd+xd))
X <- data.frame(x=x, y=y)
X$xnew <- (x-min(x))/(max(x)-min(x))*nbins + 0.5
X$ynew <- (y-min(y))/(max(y)-min(y))*nbins + 0.5
if (plot) {
print(ggplot() + geom_tile(aes(xd,yd,fill=Freq), colour="grey50", data=ndf) + scale_fill_gradient2(name = "Count") + xlab("p") + ylab("q") )
}
invisible(ndf)
}
nij <- binplot(X1,X2)
ggsave("bin-example-1.pdf", height = 3.5, width = 4.2)
freqplot <- function(x, y, nbins= 8, plot=TRUE) {
xd <- cut(x, breaks=nbins, labels=as.character(1:nbins))
yd <- cut(y, breaks=nbins, labels=as.character(1:nbins))
ndf <- as.data.frame(xtabs(~yd+xd))
X <- data.frame(x=x, y=y)
if (plot) {
print(ggplot() + geom_tile(aes(xd,yd,fill=0.5), colour="grey50", data=ndf) +
scale_fill_gradient2(name = "Count") + xlab("p") + ylab("q") +
geom_text(aes(xd, yd, label = Freq), data = ndf) +
theme(legend.position = "none") )
}
invisible(ndf)
}
mij <- freqplot(X1,X2)
m1 <- mij
ggsave("freq-example-1.pdf", height = 3.5, width = 3.5)
X1 <- sample(X1)
samp.dat <- data.frame(X1 = X1, X2 = X2)
qplot(X1, X2, geom = "point", size = I(3), xlab = "Permuted X1")
ggsave("dat-example-2.pdf", height = 3.5, width = 3.5)
nij <- binplot(X1,X2)
ggsave("bin-example-2.pdf", height = 3.5, width = 4.2)
mij <- freqplot(X1,X2)
m2 <- mij
ggsave("freq-example-2.pdf", height = 3.5, width = 3.5)
m12 <- merge(m1, m2, by=c("xd", "yd"))
sqrt(with(m12, sum( (Freq.x-Freq.y)^2)))
###================================================================
#### Exp1 : data generation
###================================================================
set.seed(1000)
b0 <- 5
b1 <- 15
b2 <- 8
sigma <- 12
x1 <- rpois(100, 30)
x2 <- rep(c(1,2), c(51, 49))
eps <- rnorm(100, 0, sigma)
y = b0 + b1*x1 + b2*x2 + eps
mod1 <- lm(y ~ x1)
summary(mod1)$sigma ##11.77
qplot(factor(x2), mod1$resid, geom = "boxplot")
qplot(group, val, data = obs.dat, geom = "boxplot")
###==================================================================
### Turk 1 Example
###==================================================================
dat <- read.table("../data/dat_turk1_100_16_12_3.txt", header = T) ## plot_turk1_100_16_12_3
dat.m <- melt(dat, id = c("age", "grp", "weight"))
dat.m$x <- as.numeric(dat.m$grp)
dat.m$plot <- substring(dat.m$variable, 1, 3)
dat.m$position <- substring(dat.m$variable, 4, 5)
dat.m <- dat.m[, c("x", "value", "position")]
names(dat.m) <- c("group", "val", ".sample")
lineup.dat <- dat.m
lineup.dat$group <- as.factor(lineup.dat$group)
lineup.dat$.sample <- as.numeric(as.character(lineup.dat$.sample))
levels(lineup.dat$group) <- c("A", "B")
qplot(group, val, data = lineup.dat, geom = "boxplot", col = group, ylab = "", xlab = "Group") + facet_wrap(~ .sample) + scale_color_discrete(name = "Group")
ggsave("turk1-example.pdf", height = 5, width = 5.5)
###=========================================================================
### Exp1 -- generation of distribution of distance metric
###=========================================================================
dat <- read.table("../data/dat_turk1_100_8_12_2.txt", header = T) ## plot_turk1_100_8_12_2
### The detection rate for the above lineup is 0.28. But the difference for both binned distance and regression based distance are large negative.
### Melting the data
dat.m <- melt(dat, id = c("age", "grp", "weight"))
### Changing the categorical variable to a numerical variable
dat.m$x <- as.numeric(dat.m$grp)
### Breaking the variable name to get the position and type of plot (null or obs)
dat.m$plot <- substring(dat.m$variable, 1, 3)
dat.m$position <- substring(dat.m$variable, 4, 5)
### Finding the observed data
obs <- dat.m[dat.m$plot == "obs", c("x", "value") ]
dat.m <- dat.m[, c("x", "value", "position")]
names(dat.m) <- c("group", "val", ".sample")
lineup.dat <- dat.m
lineup.dat$group <- as.factor(lineup.dat$group)
lineup.dat$.sample <- as.numeric(as.character(lineup.dat$.sample))
obs.dat <- subset(lineup.dat, .sample == 20)
levels(lineup.dat$group) <- c("A", "B")
qplot(group, val, data = lineup.dat, geom = "boxplot", col = group,
ylab = "", xlab = "Group") + facet_wrap(~ .sample) +
scale_color_discrete(name = "Group") + theme(legend.position = "none")
ggsave("lineup-high-prop-neg-diff.pdf", height = 4, width = 4.5)
dat.bin <- dat.bin.28 <- dat.box <- NULL
for (i in 1:1000){
samp.dat <- data.frame(group = obs.dat$group, val = rnorm(dim(obs.dat)[1], 0, 11.77))
dat1 <- sapply(1:18, function(k){
null.dat <- data.frame(group = obs.dat$group, val = rnorm(dim(obs.dat)[1], 0, 11.77))
b1 = bin_dist(X=samp.dat, PX=null.dat, lineup.dat = lineup.dat, X.bin = 2, Y.bin = 2)
b2 = bin_dist(samp.dat, null.dat, lineup.dat = lineup.dat, X.bin = 2, Y.bin = 8)
s = box_dist(samp.dat, null.dat)
return(list(b1 = b1, b2 = b2, s = s))
})
dat1 <- matrix(unlist(dat1), nrow = 3 )
dat.bin <- c(dat.bin, mean(dat1[1,]))
dat.bin.28 <- c(dat.bin.28, mean(dat1[2,]))
dat.box <- c(dat.box, mean(dat1[3,]))
}
df <- as.data.frame(t(dat1))
names(df) <- c("Binned-2-2", "Binned-2-8", "Boxplot Dist")
write.csv(df, "distr-turk1.csv", row.names = FALSE)
pos.1 <- 1:20
pos.2 <- 1:20
dat.pos <- expand.grid(pos.1 = pos.1, pos.2 = pos.2)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, bin.dist = bdist_mod_indx(pos.1, pos.2, nbin.X = 2, nbin.Y = 2))
pos <- 20
metrics.dat <- subset(metrics.dat, pos.1 != pos.2 & pos.2 != pos)
dd1 <- ddply(metrics.dat, .(pos.1), summarize, bin.mean = mean(bin.dist), len = length(bin.dist))
dat.bin <- as.data.frame(dat.bin)
ggplot() + geom_density(data = dat.bin, aes(x = dat.bin),
fill = "grey80", col = "grey80" ) +
geom_segment(data = subset(dd1, len == 19),
aes(x= bin.mean, xend = bin.mean,
y=0.02*max(density(dat.bin$dat.bin)$y),
yend = 0.2*max(density(dat.bin$dat.bin)$y)),
colour="darkorange", size=1) +
geom_segment(data = subset(dd1, len != 19),
aes(x = bin.mean, xend = bin.mean,
y = rep(0.02*max(density(dat.bin$dat.bin)$y),19),
yend = rep(0.1*max(density(dat.bin$dat.bin)$y),19)),
size=1, alpha = I(0.7)) + xlab("Binned Distance (p = q = 2)") +
ylab("") + geom_text(data = dd1, y = - 0.03*max(density(dat.bin$dat.bin)$y),
size = 2.5, aes(x = bin.mean, label = pos.1)) +
ylim(c(- 0.04*max(density(dat.bin$dat.bin)$y),
max(density(dat.bin$dat.bin)$y) + 0.1*max(density(dat.bin$dat.bin)$y)))
ggsave("distribution-bin-dist-2-2-exp1.pdf", height = 4, width = 4.5)
### bin_dist with x.bin = 2, y.bin = 8
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, bin.dist = bdist_mod_indx(pos.1, pos.2, nbin.X = 2, nbin.Y = 8))
pos <- 20
metrics.dat <- subset(metrics.dat, pos.1 != pos.2 & pos.2 != pos)
dd1 <- ddply(metrics.dat, .(pos.1), summarize, bin.mean = mean(bin.dist), len = length(bin.dist))
dat.bin <- as.data.frame(dat.bin.28)
ggplot() + geom_density(data = dat.bin, aes(x = dat.bin.28), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd1, len == 19), aes(x= bin.mean, xend = bin.mean, y=0.02*max(density(dat.bin$dat.bin.28)$y), yend = 0.2*max(density(dat.bin$dat.bin.28)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd1, len != 19), aes(x = bin.mean, xend = bin.mean, y = rep(0.02*max(density(dat.bin$dat.bin.28)$y),19), yend = rep(0.1*max(density(dat.bin$dat.bin.28)$y),19)), size=1, alpha = I(0.7)) + xlab("Binned Distance (p = 2, q = 8)") + ylab("") + geom_text(data = dd1, y = - 0.03*max(density(dat.bin$dat.bin.28)$y), size = 2.5, aes(x = bin.mean, label = pos.1)) + ylim(c(- 0.04*max(density(dat.bin$dat.bin.28)$y), max(density(dat.bin$dat.bin.28)$y) + 0.1*max(density(dat.bin$dat.bin.28)$y)))
ggsave("distribution-bin-dist-2-8-exp1.pdf", height = 4, width = 4.5)
### box_dist
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, box.dist = box_dist_indx(pos.1, pos.2))
pos <- 20
metrics.dat <- subset(metrics.dat, pos.1 != pos.2 & pos.2 != pos)
dd3 <- ddply(metrics.dat, .(pos.1), summarize, box.mean = mean(box.dist), len = length(box.dist))
dat.box <- as.data.frame(dat.box)
ggplot() + geom_density(data = dat.box, aes(x = dat.box), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd3, len == 19), aes(x= box.mean, xend = box.mean, y=0.02*max(density(dat.box$dat.box)$y), yend = 0.2*max(density(dat.box$dat.box)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd3, len != 19), aes(x = box.mean, xend = box.mean, y = rep(0.02*max(density(dat.box$dat.box)$y),19), yend = rep(0.1*max(density(dat.box$dat.box)$y),19)), size=1, alpha = I(0.7)) + xlab("Boxplot Based Distance") + ylab("") + geom_text(data = dd3, y = - 0.03*max(density(dat.box$dat.box)$y), size = 2.5, aes(x = box.mean, label = pos.1)) + ylim(c(- 0.04*max(density(dat.box$dat.box)$y), max(density(dat.box$dat.box)$y) + 0.1*max(density(dat.box$dat.box)$y)))
ggsave("distribution-box-dist-exp1.pdf", height = 4, width = 4.5)
###==================================================================
### Turk 2 Example
###==================================================================
dat <- read.table(file.choose(), header = TRUE) #plot_turk2_100_600_12_2
dat.m <- melt(dat, id = "X")
dat.m$.sample <- substring(dat.m$variable, 2)
lineup.dat <- data.frame(x = dat.m$X, z = dat.m$value, .sample = dat.m$.sample)
lineup.dat$.sample <- as.numeric(as.character(lineup.dat$.sample))
qplot(x, z, data = lineup.dat, alpha = I(0.1), xlab = "X1", ylab = "X2") + geom_smooth(method = "lm", se = FALSE, size = 1) + facet_wrap(~ .sample)
ggsave("turk2-example.pdf", height = 5, width = 5.5)
###=================================================================
### Exp 2: data generation
###=================================================================
b0 <- 6
b1 <- -3.5
sigma <- 12
n <- 100
x1 <- rnorm(n, 0, 1)
y <- b0 + b1*x1 + rnorm(100, 0, sigma)
qplot(x1, y, geom = "point") + geom_smooth(method = "lm", se = FALSE)
### Using Lineup
dat <- read.table(file.choose(), header = TRUE) #plot_turk2_100_350_12_3
dat.m <- melt(dat, id = "X")
dat.m$.sample <- substring(dat.m$variable, 2)
lineup.dat <- data.frame(x = dat.m$X, z = dat.m$value, .sample = dat.m$.sample)
lineup.dat$.sample <- as.numeric(as.character(lineup.dat$.sample))
qplot(x, z, data = lineup.dat, alpha = I(0.1), xlab = "X1", ylab = "X2") + geom_smooth(method = "lm", se = FALSE, size = 1) + facet_wrap(~ .sample)
ggsave("lineup-exp2-neg-diff-large-prop.pdf", height = 4, width = 4.5)
## From the lineup data
obs.dat <- lineup.dat[lineup.dat$.sample == 10, ]
#qplot(x, z, data = obs.dat, geom = "point") + geom_smooth(method = "lm", se = FALSE)
mod2 <- lm(z ~ 1, data = obs.dat)
mean.null <- predict(mod2)
sd.null <- summary(mod2)$sigma
###=============================================================================
### generation of distribution of distance metric
###=============================================================================
### using bin_dist with x.bin = 2, y.bin = 2
dat.bin <- dat.bin.82 <- dat.reg <- dat.reg.no.int <- NULL
for (i in 1:1000){
zz <- NULL
for(i in 1:dim(obs.dat)[1]){
zz[i] <- rnorm(1, mean.null[i], sd.null)
}
samp.dat <- data.frame(group = obs.dat$x, z = zz )
dat1 <- sapply(1:18, function(k){
yy <- NULL
for(i in 1:dim(obs.dat)[1]){
yy[i] <- rnorm(1, mean.null[i], sd.null)
}
null.dat <- data.frame(group = obs.dat$x, z = yy)
b1 = bin_dist(samp.dat, null.dat, lineup.dat = lineup.dat, X.bin = 2, Y.bin = 2)
b2 = bin_dist(samp.dat, null.dat, lineup.dat = lineup.dat, X.bin = 8, Y.bin = 2)
r.int = reg_dist(samp.dat, null.dat)
r.no.int <- reg_no_int_dist(samp.dat, null.dat)
return(list(b1 = b1, b2 = b2, r.int = r.int, r.no.int = r.no.int))
})
dat1 <- matrix(unlist(dat1), nrow = 4 )
dat.bin <- c(dat.bin, mean(dat1[1,]))
dat.bin.82 <- c(dat.bin.82, mean(dat1[2,]))
dat.reg <- c(dat.reg, mean(dat1[3,]))
dat.reg.no.int <- c(dat.reg.no.int, mean(dat1[4,]))
}
df <- as.data.frame(t(dat1))
names(df) <- c("Binned-2-2", "Binned-8-2", "Regression Dist", "Regression No Intercept")
write.csv(df, "distr-turk2.csv", row.names = FALSE)
#opt_diff(lineup.dat, var = c('x', 'z'), 2, 10, 2, 10, 19, plot = TRUE)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, bin.dist = bdist_mod_indx(pos.1, pos.2, nbin.X = 2, nbin.Y = 2), bin.dist.82 = bdist_mod_indx(pos.1, pos.2, nbin.X = 8, nbin.Y = 2), reg.dist = reg_bin_indx(pos.1, pos.2), reg.dist.no.int = reg_no_int_indx(pos.1, pos.2))
pos <- 10
metrics.dat <- subset(metrics.dat, pos.1 != pos.2 & pos.2 != pos)
dd3 <- ddply(metrics.dat, .(pos.1), summarize, bin.mean = mean(bin.dist), bin.mean.82 = mean(bin.dist.82), reg.mean = mean(reg.dist), reg.mean.no.int = mean(reg.dist.no.int), len = length(bin.dist))
dat.bin <- as.data.frame(dat.bin)
ggplot() + geom_density(data = dat.bin, aes(x = dat.bin), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd3, len == 19), aes(x= bin.mean, xend = bin.mean, y=0.02*max(density(dat.bin$dat.bin)$y), yend = 0.2*max(density(dat.bin$dat.bin)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd3, len != 19), aes(x = bin.mean, xend = bin.mean, y = rep(0.02*max(density(dat.bin$dat.bin)$y),19), yend = rep(0.1*max(density(dat.bin$dat.bin)$y),19)), size=1, alpha = I(0.7)) + xlab("Binned Distance (p = q = 2)") + ylab("") + geom_text(data = dd3, y = - 0.03*max(density(dat.bin$dat.bin)$y), size = 2.5, aes(x = bin.mean, label = pos.1)) + ylim(c(- 0.04*max(density(dat.bin$dat.bin)$y), max(density(dat.bin$dat.bin)$y) + 0.1*max(density(dat.bin$dat.bin)$y)))
ggsave("distribution-bin-dist-2-2-exp2.pdf", height = 4, width = 4.5)
dat.bin.82 <- as.data.frame(dat.bin.82)
ggplot() + geom_density(data = dat.bin.82, aes(x = dat.bin.82), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd3, len == 19), aes(x= bin.mean.82, xend = bin.mean.82, y=0.02*max(density(dat.bin.82$dat.bin.82)$y), yend = 0.2*max(density(dat.bin.82$dat.bin.82)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd3, len != 19), aes(x = bin.mean.82, xend = bin.mean.82, y = rep(0.02*max(density(dat.bin.82$dat.bin.82)$y),19), yend = rep(0.1*max(density(dat.bin.82$dat.bin.82)$y),19)), size=1, alpha = I(0.7)) + xlab("Binned Distance (p = 8, q = 2)") + ylab("") + geom_text(data = dd3, y = - 0.03*max(density(dat.bin.82$dat.bin.82)$y), size = 2.5, aes(x = bin.mean.82, label = pos.1)) + ylim(c(- 0.04*max(density(dat.bin.82$dat.bin.82)$y), max(density(dat.bin.82$dat.bin.82)$y) + 0.1*max(density(dat.bin.82$dat.bin.82)$y)))
ggsave("distribution-bin-dist-8-2-exp2.pdf", height = 4, width = 4.5)
dat.reg <- as.data.frame(dat.reg)
ggplot() + geom_density(data = dat.reg, aes(x = dat.reg), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd3, len == 19), aes(x= reg.mean, xend = reg.mean, y=0.02*max(density(dat.reg$dat.reg)$y), yend = 0.2*max(density(dat.reg$dat.reg)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd3, len != 19), aes(x = reg.mean, xend = reg.mean, y = rep(0.02*max(density(dat.reg$dat.reg)$y),19), yend = rep(0.1*max(density(dat.reg$dat.reg)$y),19)), size=1, alpha = I(0.7)) + xlab("Regression Based Distance") + ylab("") + geom_text(data = dd3, y = - 0.03*max(density(dat.reg$dat.reg)$y), size = 2.5, aes(x = reg.mean, label = pos.1)) + ylim(c(- 0.04*max(density(dat.reg$dat.reg)$y), max(density(dat.reg$dat.reg)$y) + 0.1*max(density(dat.reg$dat.reg)$y)))
ggsave("distribution-reg-dist-exp2.pdf", height = 4, width = 4.5)
dat.reg.no.int <- as.data.frame(dat.reg.no.int)
ggplot() + geom_density(data = dat.reg.no.int, aes(x = dat.reg.no.int), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd3, len == 19), aes(x= reg.mean.no.int, xend = reg.mean.no.int, y=0.02*max(density(dat.reg.no.int$dat.reg.no.int)$y), yend = 0.2*max(density(dat.reg.no.int$dat.reg.no.int)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd3, len != 19), aes(x = reg.mean.no.int, xend = reg.mean.no.int, y = rep(0.02*max(density(dat.reg.no.int$dat.reg.no.int)$y),19), yend = rep(0.1*max(density(dat.reg.no.int$dat.reg.no.int)$y),19)), size=1, alpha = I(0.7)) + xlab("Regression Based Distance \n (only slope)") + ylab("") + geom_text(data = dd3, y = - 0.03*max(density(dat.reg.no.int$dat.reg.no.int)$y), size = 2.5, aes(x = reg.mean.no.int, label = pos.1)) + ylim(c(- 0.04*max(density(dat.reg.no.int$dat.reg.no.int)$y), max(density(dat.reg.no.int$dat.reg.no.int)$y) + 0.1*max(density(dat.reg.no.int$dat.reg.no.int)$y)))
ggsave("distribution-reg-no-int-dist-exp2.pdf", height = 4, width = 4.5)
### P-value distance
pval_dist <- function(X) {
as.numeric(summary(lm(X[,2] ~ X[,1], data = X))$coefficients[,4][2])
}
dat <- NULL
for (i in 1:1000){
zz <- NULL
for(k in 1:dim(obs.dat)[1]){
zz[k] <- rnorm(1, mean = mean.null[k], sd = sd.null)
}
samp.dat <- data.frame(group = obs.dat$x, z = zz )
qplot(group, z, data = samp.dat) + geom_smooth(method = "lm", se = FALSE)
dat <- c(dat, pval_dist(samp.dat))
}
pval <- ddply(lineup.dat, .(.sample), summarize, p = pval_dist(data.frame(lineup.dat[lineup.dat$.sample == .sample,1], lineup.dat[lineup.dat$.sample == .sample,2])))
pos <- 10
m <- 20
qplot(dat, geom = "density", fill = I("grey80"), colour = I("grey80"),
xlab = "p-value", ylab = "") + geom_segment(aes(x = pval$p[pval$.sample !=
10], xend = pval$p[pval$.sample != pos], y = rep(0.01 * min(density(dat)$y),
(m - 1)), yend = rep(0.1 * max(density(dat)$y), (m - 1))), size = 1, alpha = I(0.7)) +
geom_segment(aes(x = pval$p[pval$.sample ==
10], xend = pval$p[pval$.sample == 10], y = 0.01 * min(density(dat)$y), yend = 0.2 * max(density(dat)$y)),
colour = "darkorange", size = 1) + geom_text(data = pval, y = -0.03 * max(density(dat)$y),
size = 2.5, aes(x = p, label = .sample)) + ylim(c(-0.04 * max(density(dat)$y),
max(density(dat)$y) + 0.1))
ggsave("distribution-pval-exp2.pdf", height = 4, width = 4.5)
####========================================================================================================
### using bin_dist with x.bin = 8, y.bin = 2
opt_diff(lineup.dat, var = c('x', 'z'), 2, 10, 2, 10, 10, plot = TRUE)
dat <- NULL
for (i in 1:1000){
zz <- NULL
for(i in 1:dim(obs.dat)[1]){
zz[i] <- rnorm(1, mean.null[i], sd.null)
}
samp.dat <- data.frame(group = obs.dat$x, z = zz )
dat1 <- replicate(18, {
yy <- NULL
for(i in 1:dim(obs.dat)[1]){
yy[i] <- rnorm(1, mean.null[i], sd.null)
}
null.dat <- data.frame(group = obs.dat$x, z = yy)
bin_dist(samp.dat, null.dat, lineup.dat = lineup.dat, X.bin = 8, Y.bin = 2)
})
dat <- c(dat, mean(dat1))
}
ddd <- distmet(lineup.dat, var = c("x", "z"), 'bin_dist', null_permute("x"), pos = 10, dist.arg = list(X.bin = 8, Y.bin = 2))
pos <- 10
m <- 20
### Using reg_dist
dat <- NULL
for (i in 1:1000){
zz <- NULL
for(i in 1:dim(obs.dat)[1]){
zz[i] <- rnorm(1, mean.null[i], sd.null)
}
samp.dat <- data.frame(group = obs.dat$x, z = zz )
dat1 <- replicate(18, {
yy <- NULL
for(i in 1:dim(obs.dat)[1]){
yy[i] <- rnorm(1, mean.null[i], sd.null)
}
null.dat <- data.frame(group = obs.dat$x, z = yy)
reg_dist(samp.dat, null.dat)
})
dat <- c(dat, mean(dat1))
}
ddd <- distmet(lineup.dat, var = c("x", "z"), 'reg_dist', null_permute("x"), pos = 10)
pos <- 10
m <- 20
dat.reg <- as.data.frame(dat.reg)
ggplot() + geom_density(data = dat.reg, aes(x = dat.reg), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd3, len == 19), aes(x= reg.mean, xend = reg.mean, y=0.02*max(density(dat.reg$dat.reg)$y), yend = 0.2*max(density(dat.reg$dat.reg)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd3, len != 19), aes(x = reg.mean, xend = reg.mean, y = rep(0.02*max(density(dat.reg$dat.reg)$y),19), yend = rep(0.1*max(density(dat.reg$dat.reg)$y),19)), size=1, alpha = I(0.7)) + xlab("Distance") + ylab("") + geom_text(data = dd3, y = - 0.03*max(density(dat.reg$dat.reg)$y), size = 2.5, aes(x = reg.mean, label = pos.1)) + ylim(c(- 0.04*max(density(dat.reg$dat.reg)$y), max(density(dat.reg$dat.reg)$y) + 0.1*max(density(dat.reg$dat.reg)$y)))
ggsave("distribution-reg-dist-exp2.pdf", height = 4, width = 4.5)
### Reg distance: no intercept
reg_no_int_dist <- function(X, PX, nbins = 1) {
ss <- seq(min(X[, 1]), max(X[, 1]), length = nbins + 1)
beta.X <- NULL
beta.PX <- NULL
for (k in 1:nbins) {
X.sub <- subset(X, X[, 1] >= ss[k] & X[, 1] <= ss[k + 1])
PX.sub <- subset(PX, X[, 1] >= ss[k] & X[, 1] <= ss[k + 1])
b.X <- as.numeric(coef(lm(X.sub[, 2] ~ X.sub[, 1])))
b.PX <- as.numeric(coef(lm(PX.sub[, 2] ~ PX.sub[, 1])))
beta.X <- rbind(beta.X, b.X)
beta.PX <- rbind(beta.PX, b.PX)
}
beta.X <- subset(beta.X, !is.na(beta.X[, 2]))
beta.PX <- subset(beta.PX, !is.na(beta.PX[, 2]))
sum((beta.X[, 2] - beta.PX[, 2])^2)
}
### Reg distance: no intercept
reg_no_int_indx <- function(i, j, nbins = 1){
X <- lineup.dat[lineup.dat$.sample == i, ]
PX <- lineup.dat[lineup.dat$.sample == j, ]
ss <- seq(min(X[,1]), max(X[,1]), length = nbins + 1)
beta.X <- NULL ; beta.PX <- NULL
for(k in 1:nbins){
X.sub <- subset(X, X[,1] >= ss[k] & X[,1] <= ss[k + 1])
PX.sub <- subset(PX, X[,1] >= ss[k] & X[,1] <= ss[k + 1])
b.X <- as.numeric(coef(lm(X.sub[,2] ~ X.sub[,1])))
b.PX <- as.numeric(coef(lm(PX.sub[,2] ~ PX.sub[,1])))
beta.X <- rbind(beta.X, b.X)
beta.PX <- rbind(beta.PX, b.PX)
}
beta.X <- subset(beta.X, !is.na(beta.X[,2]))
beta.PX <- subset(beta.PX, !is.na(beta.PX[,2]))
sum((beta.X[,2] - beta.PX[,2])^2)
}
dat <- NULL
for (i in 1:1000){
zz <- NULL
for(i in 1:dim(obs.dat)[1]){
zz[i] <- rnorm(1, mean.null[i], sd.null)
}
samp.dat <- data.frame(group = obs.dat$x, z = zz )
dat1 <- replicate(18, {
yy <- NULL
for(i in 1:dim(obs.dat)[1]){
yy[i] <- rnorm(1, mean.null[i], sd.null)
}
null.dat <- data.frame(group = obs.dat$x, z = yy)
reg_no_int_dist(samp.dat, null.dat)
})
dat <- c(dat, mean(dat1))
}
ddd <- distmet(lineup.dat, var = c("x", "z"), 'reg_no_int_dist', null_permute("x"), pos = 10)
dat.reg.no.int <- as.data.frame(dat.reg.no.int)
ggplot() + geom_density(data = dat.reg.no.int, aes(x = dat.reg.no.int), fill = "grey80", col = "grey80" ) + geom_segment(data = subset(dd3, len == 19), aes(x= reg.mean.no.int, xend = reg.mean.no.int, y=0.02*max(density(dat.reg.no.int$dat.reg.no.int)$y), yend = 0.2*max(density(dat.reg.no.int$dat.reg.no.int)$y)), colour="darkorange", size=1) + geom_segment(data = subset(dd3, len != 19), aes(x = reg.mean.no.int, xend = reg.mean.no.int, y = rep(0.02*max(density(dat.reg.no.int$dat.reg.no.int)$y),19), yend = rep(0.1*max(density(dat.reg.no.int$dat.reg.no.int)$y),19)), size=1, alpha = I(0.7)) + xlab("Distance") + ylab("") + geom_text(data = dd3, y = - 0.03*max(density(dat.reg.no.int$dat.reg.no.int)$y), size = 2.5, aes(x = reg.mean.no.int, label = pos.1)) + ylim(c(- 0.04*max(density(dat.reg.no.int$dat.reg.no.int)$y), max(density(dat.reg.no.int$dat.reg.no.int)$y) + 0.1*max(density(dat.reg.no.int$dat.reg.no.int)$y)))
ggsave("distribution-reg-no-int-dist-exp2.pdf", height = 4, width = 4.5)
###=======================================================================================================================
res.dat <- read.csv(file.choose())
is.character(res.dat$pic_name)
res.lineup <- subset(res.dat, pic_name == "plot_turk2_100_350_12_3.png")
pval <- function(t){
summary(lm(z ~ x, data = subset(lineup.dat, lineup.dat$.sample == t)))$coefficients[,4][2]
}
dat <- read.table("dat_turk2_100_350_12_3.txt", header = T)
dat.m <- melt(dat, id = "X")
dat.m$.sample <- substring(dat.m$variable, 2)
lineup.dat <- data.frame(x = dat.m$X, z = dat.m$value, .sample = dat.m$.sample)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, reg.bin = reg_bin_indx(pos.1, pos.2), bin.dist = bdist_mod_indx(pos.1, pos.2, nbin.X = 8, nbin.Y = 2))
metrics.sub <- subset(metrics.dat, pos.1 != pos.2 & pos.2 != 10)
dd <- ddply(metrics.sub, .(pos.1), summarize, reg.mean = mean(reg.bin), bin.mean = mean(bin.dist))
dd$pic_name = "plot_turk2_100_350_12_3.png"
dat.merge <- merge(dd, res.lineup, by = "pic_name")
dat.merge <- subset(dat.merge, select = c(reg.mean, bin.mean, response_no, pos.1))
rel <- ddply(dat.merge, .(pos.1), summarize, rel.freq = sum(response_no == pos.1)/dim(res.lineup)[1], reg.mean = mean(reg.mean), bin.mean = mean(bin.mean))
pvalue.dat <- ddply(lineup.dat, .(.sample), summarize, p = pval(.sample))
pval.merge <- merge(rel, pvalue.dat, by.x = "pos.1", by.y = ".sample")
pval.merge$plot_loc <- ifelse(pos.1 == 10, 1, 0)
qplot(log(p), rel.freq, data = pval.merge, geom = "point", col = plot_loc) + geom_linerange(aes(x = log(p), ymin = 0, ymax = rel.freq )) + theme(legend.position="none") + scale_colour_continuous(high = "red", low = "black") + scale_x_continuous("p-value" ) + scale_y_continuous("Relative Frequency", limits = c(0,1), breaks = c(0, 0.5, 1))
ggsave("rel-pvalue.pdf", height = 2.5, width = 4)
rel.m <- melt(rel, id = c("pos.1", "rel.freq"))
rel.m$plot_loc <- ifelse(pos.1 == 10, 1, 0)
levels(rel.m$variable) <- c("Regression Based", "Binned")
qplot(value, rel.freq, data = rel.m, geom = "point", col = plot_loc) + geom_linerange(aes(x = value, ymin = 0, ymax = rel.freq )) + theme(legend.position="none") + scale_colour_continuous(high = "red", low = "black") + facet_grid(variable ~ ., scales = "free_x") + scale_x_continuous("Mean Distances" ) + scale_y_continuous("Relative Frequency", limits = c(0,1), breaks = c(0, 0.5, 1))
# + theme(legend.position="none", axis.ticks = element_blank(), axis.text.x = element_blank())
### Turk Experiment : Large p, Small n
lineup.dat <- read.table(file.choose(), header = TRUE) # plot_large_p_small_n_30_80_0_2_3
qplot(X1, X2, data = lineup.dat, geom = "point", alpha = I(0.6), size = I(3), col = factor(cl)) + facet_wrap(~.sample) + scale_color_discrete(name = "Group")
ggsave("lineup-large-p-small-n.pdf", height = 5, width = 5.5)
#### New Lineup
generate_plot_2d<-function(n=30,p, noise=1, m=20){
x<-matrix(rnorm(p*n),ncol=p)
if(noise==0){
x[1:10,(p-1)]<-x[1:10,(p-1)]+3
x[11:20,(p-1)]<-x[11:20,(p-1)]-3
x[21:30,p]<-x[21:30,p]+sqrt(27)
}
colnames(x)<-paste("X",1:(p),sep="")
x<-scale(x)
x<-data.frame(x, cl=factor(c(rep(1,n/3),rep(2,n/3),rep(3,n/3))))
d=2
optima <- save_history(x[,-(p+1)], tour_path=guided_tour(index_f=pda_pp(cl=x[,(p+1)], lambda=0.2), max.tries=1000), max_bases=100, rescale=F)
nbases<-dim(optima)[3]
optima.global<-unclass(optima)[,,nbases]
projdata.true<-data.frame(as.matrix(x[,-(p+1)])%*%optima.global, cl=x[,(p+1)], nbases=rep(nbases,n))
projdata.samples<-NULL
flag <- 0
while(flag < 19) {
x[,(p+1)]<-sample(x[,(p+1)])
optima <- save_history(x[,-(p+1)], guided_tour(index_f=pda_pp(cl=x[,(p+1)], lambda=0.2),max.tries=100), max_bases=100, rescale=F)
nbases<-dim(optima)[3]
optima.global<-unclass(optima)[,,nbases]
projdata<-data.frame(as.matrix(x[,-(p+1)])%*%optima.global, cl=x[,(p+1)], nbases=rep(nbases,30))
lamb<-summary(manova(cbind(X1, X2)~cl, data=projdata), test="Wilks")[[4]][3]
if(lamb < 0.001){
projdata.samples<-rbind(projdata.samples, projdata)
flag <- flag + 1
}else
{
projdata <- NULL
projdata.samples <- rbind(projdata.samples, projdata)
flag = flag
}
cat(flag, lamb, "\n")
}
projdata.samples$.n <- rep(1:19, each = 30)
#pos<-sample(m,1)
lineup.data<-lineup(true=projdata.true, samples=projdata.samples,pos=1)
return(lineup.data)
}
dat <- generate_plot_2d(p = 80, noise = 0)
qplot(X1, X2, data=dat, colour=cl) + facet_wrap(~ .sample) + scale_colour_discrete(name="Group") + scale_y_continuous("X2") + scale_x_continuous("X1")
ggsave("largep-lineup-new-1.pdf", height = 5, width = 5.5)
n <- 30; p <- 80
dat.b <- dat.s <- NULL
for (i in 1:15){
flag <- 0
while(flag < 1){
x<-matrix(rnorm(p*n),ncol=p)
x[1:10,(p-1)]<-x[1:10,(p-1)]+3
x[11:20,(p-1)]<-x[11:20,(p-1)]-3
x[21:30,p]<-x[21:30,p]+sqrt(27)
colnames(x)<-paste("X",1:(p),sep="")
x<-scale(x)
x<-data.frame(x, cl=factor(c(rep(1,n/3),rep(2,n/3),rep(3,n/3))))
x[,(p+1)]<-sample(x[,(p+1)])
optima <- save_history(x[,-(p+1)], tour_path=guided_tour(index_f=pda_pp(cl=x[,(p+1)], lambda=0.2), max.tries=100), max_bases=100, rescale=F)
nbases<-dim(optima)[3]
optima.global<-unclass(optima)[,,nbases]
projdata.null<-data.frame(as.matrix(x[,-(p+1)])%*%optima.global, cl=x[,(p+1)], nbases=rep(nbases,n))
lamb<-summary(manova(cbind(X1, X2)~cl, data=projdata.null), test="Wilks")[[4]][3]
if(lamb < 0.001){
flag <- flag + 1
}
cat(flag, lamb, "\n")
}
dat1 <- sapply(1:5, function(k){
flag1 <- 0
while(flag1 < 1){
x[,(p+1)]<-sample(x[,(p+1)])
optima <- save_history(x[,-(p+1)], guided_tour(index_f=pda_pp(cl=x[,(p+1)], lambda=0.2),max.tries=100), max_bases=100, rescale=F)
nbases<-dim(optima)[3]
optima.global<-unclass(optima)[,,nbases]
projdata<-data.frame(as.matrix(x[,-(p+1)])%*%optima.global, cl=x[,(p+1)], nbases=rep(nbases,30))
lamb1 <- summary(manova(cbind(X1, X2)~cl, data=projdata), test="Wilks")[[4]][3]
if(lamb1 < 0.001){
flag1 <- flag1 + 1
}
cat("flag1 =", flag1, "i = ", i, lamb1, "\n")
}
projdata.null$cl <- as.numeric(projdata.null$cl)
projdata$cl <- as.numeric(projdata$cl)
b = bin_dist(projdata.null, projdata, lineup.dat = dat, X.bin = 6, Y.bin = 4)
s = sep_dist(projdata.null, projdata, clustering = TRUE, nclust = 3)
return(list(b = b, s = s))
})
dat1 <- matrix(unlist(dat1), nrow = 2 )
dat.b <- c(dat.b, mean(dat1[1,]))
dat.s <- c(dat.s, mean(dat1[2,]))
}
#opt_diff(lineup.dat, var = c('X1', 'X2'), 2, 10, 2, 10, 20, plot = TRUE)
#detach(package:plyr)
library(dplyr)
ddd <- distmet(dat, var = c("X1", "X2"), 'bin_dist', null_permute("cl"), pos = 1, dist.arg = list(X.bin = 6, Y.bin = 4))
pos <- 1
m <- 20
qplot(dat.b, geom = "density", fill = I("grey80"), colour = I("grey80"),
xlab = "Permutation distribution", ylab = "") + geom_segment(aes(x = ddd$lineup$mean.dist[ddd$lineup$plotno !=
pos], xend = ddd$lineup$mean.dist[ddd$lineup$plotno != pos], y = rep(0.01 * min(density(dat.b)$y),
(m - 1)), yend = rep(0.05 * max(density(dat.b)$y), (m - 1))), size = 1, alpha = I(0.7)) +
geom_segment(aes(x = ddd$lineup$mean.dist[ddd$lineup$plotno == pos], xend = ddd$lineup$mean.dist[ddd$lineup$plotno ==
pos], y = 0.01 * min(density(dat.b)$y), yend = 0.1 * max(density(dat.b)$y)),
colour = "darkorange", size = 1) + geom_text(data = ddd$lineup, y = -0.03 * max(density(dat.b)$y),
size = 2.5, aes(x = mean.dist, label = plotno)) + ylim(c(-0.04 * max(density(dat.b)$y),
max(density(dat.b)$y) + 0.1))
ggsave("bin-dist-largep-6-4-new-1.pdf", height = 5, width = 5.5)
### Sep dist
dat$cl <- as.numeric(dat$cl)
ddd <- distmet(dat, var = c("X1", "X2", "cl"), 'sep_dist', null_permute("cl"), pos = 1, dist.arg = list(clustering = TRUE, nclust = 3))
pos <- 1; m = 20
qplot(dat.s, geom = "density", fill = I("grey80"), colour = I("grey80"),
xlab = "Permutation distribution", ylab = "") + geom_segment(aes(x = ddd$lineup$mean.dist[ddd$lineup$plotno !=
pos], xend = ddd$lineup$mean.dist[ddd$lineup$plotno != pos], y = rep(0.01 * min(density(dat.s)$y),
(m - 1)), yend = rep(0.05 * max(density(dat.s)$y), (m - 1))), size = 1, alpha = I(0.7)) +
geom_segment(aes(x = ddd$lineup$mean.dist[ddd$lineup$plotno == pos], xend = ddd$lineup$mean.dist[ddd$lineup$plotno ==
pos], y = 0.01 * min(density(dat.s)$y), yend = 0.1 * max(density(dat.s)$y)),
colour = "darkorange", size = 1) + geom_text(data = ddd$lineup, y = -0.03 * max(density(dat.s)$y),
size = 2.5, aes(x = mean.dist, label = plotno)) + ylim(c(-0.04 * max(density(dat.s)$y),
max(density(dat.s)$y) + 0.1))
ggsave("sep-dist-largep-new-1.pdf", height = 5, width = 5.5)
### Case Study
## Separation Distance
X <- lineup.dat[lineup.dat$.sample == 20, ]
PX <- lineup.dat[lineup.dat$.sample == 7, ]
M2sep_dist_indx <- function(i, j, clustering = FALSE, nclust = 3){
X <- lineup.dat[lineup.dat$.sample == i,]
PX <- lineup.dat[lineup.dat$.sample == j,]
require(fpc)
dX <- dist(X[,1:2])
dPX <- dist(PX[,1:2])
if(clustering){
X$cl <- X[,3]
PX$cl <- PX[,3]
X.clus <- as.vector(cluster.stats(dX, clustering = X$cl)$ave.between.matrix)
PX.clus <- as.vector(cluster.stats(dPX, clustering = PX$cl)$ave.between.matrix)
}else{
complete.X <- cutree(hclust(dX), nclust)
complete.PX <- cutree(hclust(dPX), nclust)
X.clus <- as.vector(cluster.stats(dX, complete.X)$ave.between.matrix)
PX.clus <- as.vector(cluster.stats(dPX, complete.PX)$ave.between.matrix)
}
sqrt(sum((X.clus - PX.clus)^2))
}
M3sep_dist_indx <- function(i, j, clustering = FALSE, nclust = 3){
X <- lineup.dat[lineup.dat$.sample == i,]
PX <- lineup.dat[lineup.dat$.sample == j,]
require(fpc)
dX <- dist(X[,1:2])
dPX <- dist(PX[,1:2])
if(clustering){
X$cl <- X[,3]
PX$cl <- PX[,3]
X.clus <- cluster.stats(dX, clustering = X$cl)$wb.ratio
PX.clus <- cluster.stats(dPX, clustering = PX$cl)$wb.ratio
}else{
complete.X <- cutree(hclust(dX), nclust)
complete.PX <- cutree(hclust(dPX), nclust)
X.clus <- cluster.stats(dX, complete.X)$wb.ratio
PX.clus <- cluster.stats(dPX, complete.PX)$wb.ratio
}
sqrt(sum((X.clus - PX.clus)^2))
}
lineup.dat <- read.table(file.choose(), header = TRUE) # plot_large_p_small_n_30_80_0_2_3
qplot(X1, X2, data = lineup.dat, geom = "point", alpha = I(0.6), size = I(2), col = factor(cl)) + facet_wrap(~.sample) + scale_color_discrete(name = "Group")
part.largep <- distmet(lineup.dat, var = c("X1", "X2", "cl"), 'M3sep_dist', null_permute("cl"), pos = 20, repl = 1, dist.arg = list(clustering = TRUE, nclust = 3))
part.largep$lineup$plotno[order(part.largep$lineup$mean.dist, decreasing = TRUE)]
files.png <- dir("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/large-p-exp","*.png")
files.txt <- dir("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/large-p-exp","*.txt")
metrics1 <- NULL
metrics2 <- NULL
for(i in 1:length(files.txt)){
dat <- read.table(paste("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/large-p-exp/",files.txt[i], sep = ""), header = T)
if(dim(dat)[2] == 4){
lineup.dat <- data.frame(x = dat$x, z = dat$cl, cl = dat$cl, .sample = dat$.sample)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, sep.dist = M3sep_dist_indx(pos.1, pos.2, clustering = TRUE, nclust = 2))
metrics.dat1 <- data.frame(metrics.dat, pic_name = files.png[i])
metrics1 <- rbind(metrics1, metrics.dat1)
}
if(dim(dat)[2] == 6){
lineup.dat <- data.frame(x = dat$X1, z = dat$X2, cl = dat$cl, .sample = dat$.sample)
metrics.dat <- ddply(dat.pos, .(pos.1, pos.2), summarize, sep.dist = M3sep_dist_indx(pos.1, pos.2, clustering = TRUE, nclust = 3))
metrics.dat2 <- data.frame(metrics.dat, pic_name = files.png[i])
metrics2 <- rbind(metrics2, metrics.dat2)
}
metrics <- rbind(metrics1, metrics2)
}
#write.table(metrics, "largep-metrics.txt", row.names = F)
#qplot(X1, X2, data = dat, geom = "point", alpha = I(0.7), color = factor(cl)) + facet_wrap(~.sample) + scale_colour_discrete(name = "Group")
res.exp.lp <- read.csv("/Users/Niladri/Documents/Research/Permutation/paper-metrics-data-code/Mahbub's data/raw_data_turk7.csv")
res.exp.lp <- subset(res.exp.lp, pic_name %in% files.png, select = c(pic_name, response, response_no, plot_location, time_taken))
res.dat <- ddply(res.exp.lp, .(pic_name), summarize, prop = sum(response)/length(response), pos = mean(plot_location), m.time = median(time_taken))
metrics.sub <- subset(metrics, pos.1 != pos.2)
dat.merge <- merge(metrics.sub, res.dat, by = "pic_name")
dat.merge <- subset(dat.merge, pos.2 != pos)
dd <- ddply(dat.merge, .(pic_name, pos.1), summarize, sep.mean = mean(sep.dist), len = length(sep.dist), prop = mean(prop), m.time = mean(m.time))
prop.dist <- ddply(dd, .(pic_name), summarize, diff.sep = sep.mean[len == 19] - max(sep.mean[len == 18]), grtr.sep = sum(sep.mean[len == 18] > sep.mean[len == 19]), prop = mean(prop), m.time = mean(m.time))
qplot(diff.sep, prop, data = prop.dist, size = I(3), xlab = "Difference", ylab = "Detection Rate")+ geom_smooth(se = FALSE) + geom_vline(xintercept = 0, col = "red")+ theme(legend.position = "none")
######===================================================================================
wilks <- function(x){
summary(manova(cbind(X1, X2)~cl, data = subset(lineup.dat, lineup.dat$.sample == x)), test="Wilks")[[4]][3]
}
dddd <- ddply(lineup.dat, .(.sample), summarize, w = wilks(.sample))
sum(dddd$w - dddd$w[dddd$.sample == 19])/19
wilks_dist <- function(X, PX, clustering = FALSE, nclust = 3) {
dX <- dist(X[, 1:2])
dPX <- dist(PX[, 1:2])
if (clustering) {
X$cl <- X[, 3]
PX$cl <- PX[, 3]
X.clus <- summary(manova(cbind(X[,1], X[,2])~cl, data = X), test="Wilks")[[4]][3]
PX.clus <- summary(manova(cbind(PX[,1], PX[,2])~cl, data = PX), test="Wilks")[[4]][3]
} else {
complete.X <- cutree(hclust(dX), nclust)
complete.PX <- cutree(hclust(dPX), nclust)
X.clus <- summary(manova(cbind(X[,1], X[,2])~cl, data = X), test="Wilks")[[4]][3]
PX.clus <- summary(manova(cbind(PX[,1], PX[,2])~cl, data = PX), test="Wilks")[[4]][3]
}
sn.cl <- sign(X.clus - PX.clus)
ifelse(sn.cl == -1, sqrt(sum((X.clus - PX.clus)^2)),0)
}
eden <- ddply(dat.pos, .(pos.1, pos.2), summarize, d = wilks_dist(lineup.dat[lineup.dat$.sample == pos.1, ], lineup.dat[lineup.dat$.sample == pos.2, ], clustering = TRUE))
eden <- subset(eden, pos.1 != pos.2 & pos.2 != 20)
ddply(eden, .(pos.1), summarize, mean(d))
wilks_dist_indx <- function(i, j, clustering = FALSE, nclust = 3){
X <- lineup.dat[lineup.dat$.sample == i,]
PX <- lineup.dat[lineup.dat$.sample == j,]
require(fpc)
dX <- dist(X[,1:2])
dPX <- dist(PX[,1:2])
X$cl <- X[, 3]
PX$cl <- PX[, 3]
X.clus <- summary(manova(cbind(X[,1], X[,2])~cl, data = X), test="Wilks")[[4]][3]
PX.clus <- summary(manova(cbind(PX[,1], PX[,2])~cl, data = PX), test="Wilks")[[4]][3]
sn.cl <- sign(X.clus - PX.clus)
ifelse(sn.cl == -1, sqrt(sum((X.clus - PX.clus)^2)),0)
}
|
67573da26a88ab14f59421614ae8ff2ae3c15079 | de0400bfb372ffbcbfa7db1bea39ef3578c3257b | /man/is_empty_rtable.Rd | 92baf816a9ca60d6d0f4bcc3b27e0241801f98d5 | [
"MIT",
"Apache-2.0"
] | permissive | bbaranow/rtables | caef2758ed69ee8d73516c8bad43fc65d6c32a9c | efdfd495553dd682c5f1d388b778dac635a29cca | refs/heads/master | 2022-11-18T23:46:01.900811 | 2020-06-09T12:38:56 | 2020-06-09T12:38:56 | 277,768,817 | 0 | 1 | NOASSERTION | 2020-07-15T11:42:29 | 2020-07-07T09:05:42 | null | UTF-8 | R | false | true | 285 | rd | is_empty_rtable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtable.R
\name{is_empty_rtable}
\alias{is_empty_rtable}
\title{If rtable is empty}
\usage{
is_empty_rtable(x)
}
\arguments{
\item{x}{object}
}
\value{
if rtable is empty
}
\description{
If rtable is empty
}
|
9f6c4b6cf1865d254575925a339f4ceb903798bf | a595d015cbd1ca1e3ad435cf90251b39c3d230d2 | /R_Scripts/DESEQ2_TKO.R | 6ed427e4168475c4be73b57cf97d6fc4e6ab8d0b | [] | no_license | sbodapati/CRISPR_Benchmarking_Algorithms | 00256deab82f65658dc874cdea5fd665c8ce59e8 | 30da389cca81bca673712944856df447b1d752c7 | refs/heads/master | 2021-08-01T08:23:38.081571 | 2020-04-16T16:17:45 | 2020-04-16T16:17:45 | 203,469,469 | 4 | 3 | null | 2021-07-25T19:52:26 | 2019-08-20T23:26:49 | HTML | UTF-8 | R | false | false | 1,167 | r | DESEQ2_TKO.R | ---
title: "Testing Sunil’s simulations"
author: "Timothy Daley"
date: "2/11/2019"
output: html_document
---
testCounts = read.table(file = "/Users/sbodapati/Desktop/TimFile_2.txt", header = TRUE)
head(testCounts)
counts = testCounts[ ,1:3]
colData = data.frame(condition = factor(c(0, 1, 1))) # 1 is condition, 0 is baseline
rownames(colData) = colnames(counts)
# install DESeq2
#install.packages("BiocManager")
#BiocManager::install("DESeq2")
# set up DESeq
testCountsDESeq2 = DESeq2::DESeqDataSetFromMatrix(countData = counts,
colData = colData,
design = ~ condition)
# compute
testCountsDESeq2 = DESeq2::DESeq(testCountsDESeq2)
# get results
testCountsDESeq2 = DESeq2::results(testCountsDESeq2)
log2fc = testCountsDESeq2$log2FoldChange
library(ggplot2)
ggplot(data.frame(log2fc = log2fc, essential = factor(testCounts$Essentiality)), aes(x = log2fc, col = essential, fill = essential)) + geom_density(alpha = 0.6) + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
``` |
13eae24066e0469812aa0da4ea93e966ca31b9f2 | 12e3d5f8618bbc113e6f039b7346fc5d723015c9 | /Stats_I/Class20/Bootstrap_ClassExamples_with_ForClass.R | 3d780a3dbdcb8476ff8323356fa1e3b461853991 | [] | no_license | raschroeder/R-Coursework | 4af2ded6e9af2c0c64697dcc796a12e508f38ae4 | 1e9800b00f84cb4092c956d9910a710729b9aff3 | refs/heads/master | 2020-04-05T12:44:28.824912 | 2019-02-06T15:59:07 | 2019-02-06T15:59:07 | 156,878,511 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,345 | r | Bootstrap_ClassExamples_with_ForClass.R | ##############################################################
#####################Boostrapping basics
##############################################################
#################################
#lets Build a normal distrobution
################################
?rnorm #lets us build a normal distrobution with whatever mean and SD we want!
n=1e6 #Population Size
MeanofPop = 0
SDofPop = 1
## Build the population
population<-rnorm(n, MeanofPop, SDofPop)
hist(population)
#Build a Probability density function (just like all those in your books)
plot(density(population))
#################################
#lets sample from our population
################################
SizeofSample = 10 #we are going to take small samples from our population
?sample # This function is going to let us sample from the population.
#We can do it with or without replacement
#sample 1 - takes the population, removes a sample, and replaces with other ones
sample.1<-sample(population, SizeofSample, replace = TRUE)
hist(sample.1)
mean(sample.1) #mean shoud be around 0
SE.1<-sd(sample.1)/sqrt(SizeofSample) # should be around SDofPop
SE.1
Estimated.SEM<-SDofPop/sqrt(SizeofSample)
#Sample 2
sample.2<-sample(population, SizeofSample, replace = TRUE)
hist(sample.2)
mean(sample.2)
SE.2<-sd(sample.2)/sqrt(SizeofSample) # should be around SDofPop/sqrt(SizeofSample)
SE.2
#why are the two simulated SE so differnet from the estimted SE?
# Cause we only did 1 sample! Too bootstrap we need to sample with replacement 1000s of times!
# We need to build a distrobution of sample means!
# We could code it ourselves using a nested series of functions
# lets write on the board what we want to happen and then we will examine how to code it below.
samples.means<-replicate(1000,mean(sample(population, SizeofSample, replace = TRUE))) #takes 1000 samples and calcs the means
plot(density(samples.means))
sd(samples.means) # this is actually smaller than the esimated SEM. This is actually more accurate than estimating it.
##############################################################
#####################Boostrapping Practice 1
##############################################################
#Lets build a beta distrobution
BetaDistro<-rbeta(n, shape1=2, shape2=25, ncp = 1)
plot(density(BetaDistro))
True.Median<-median(BetaDistro)
True.Median
#Lets bootstrapp a median if this skewed distrobution.
Sample.Size.Median = 30
samples.medians<-replicate(1000,median(sample(BetaDistro, Sample.Size.Median, replace = TRUE)))
plot(density(samples.medians))
Boot.Median<-mean(samples.medians)
Boot.Median
Percent.Off<-(abs(True.Median-Boot.Median)/True.Median)*100
Percent.Off
#How can I reduce the error?
##Increase the sample size or the number of estimates
##############################################################
#####################Boostrapping a Correlation
##############################################################
# lets pretend you had 20 people on two scales and you wanted to correlate their data:
set.seed(666)
n=20
Corr.data <- data.frame(y = runif(n), x = runif(n))
Corr.data
# Original correlation
with(Corr.data, cor(x, y))
# So basically a weak relationship, lets examine CIs around the correlation
# Our function that will be used to feed into
boot_corr <- function(data, resample_vector) {
cor(data$x[resample_vector], data$y[resample_vector])
}
library(boot)
cor_results <- boot(Corr.data, boot_corr, sim = "ordinary",R = 1000)
boot.ci(cor_results)
##############################
### Some magic stuff below ###
##############################
# Let run a difference test on the SD difference between groups using boot function
# We will look to see if the differences include zero
# lets start with Some data Group 1 and 2, had a mean of 50, but group 1 has an
# SD = 25, and group 2 SD = 12.5
N=30
set.seed(42)
DatSet<-data.frame(Group1=rnorm(N,50,25),Group2=rnorm(N,50,12.5))
summary(DatSet)
sd(DatSet$Group1)
sd(DatSet$Group2)
# We need to define our test.
boot.variance <- function(data, i) {
SD1=sd(data$Group1[i])
SD2=sd(data$Group2[i])
d=(SD1-SD2)
}
Var_results <- boot(DatSet, boot.variance, sim = "ordinary",R = 1000)
boot.ci(Var_results)
# We can run a Monte-Carlo simulation as well
MonteCarlo.variance <- function(N) {
SD1=sd(rnorm(N,50,25))
SD2=sd(rnorm(N,50,12.5))
d=(SD1-SD2)
return(d)
}
sample.size=30
M.Var<-replicate(10000,MonteCarlo.variance(sample.size))
Percentle.CI.Var<-quantile(M.Var, c(.05,.5,.95))
Percentle.CI.Var
##############################################################
#####################Small Group Challange
##############################################################
# We will form groups of 3.
# The goal is to create your own bootstrapped between subjects t-test!
# You will 1) build populations, 2) sample from each distrobution, 3) create difference scores, 3) use the difference score to calculate the boot t.test (all by code!)
# Remember SD of the bootstrapped distrobution of scores = SEM of that distrobution
# SO, boot.t test = mean(differences scores)/SD(differences scores) [Note this is quick and dirty version]
# I have build you one function which will help at the end
# to get the pvalues for your t-test use tis function.
# You need to enter the t and the n as the sample size of your t-test
# Run the code to load into memory
Boot.t.pvalue<-function(boot.t, sample.size) {
pvalue<-2*pt(-abs(boot.t),df=(sample.size*2)-2)
return(pvalue)
}
# When you find you t-value, you just need to do this pvalue<- Boot.t.pvalue(tvalue, sample.size)
###################################
#######Task
###################################
#Step 1: Make chart of the steps you need to follow to solve the task
# (my headers below will help you organize your chart).
# here are the paramters you need to run this simulation study
#Make two Normal populations!
#population 1: mean = 60 , SD = 10
#population 2: mean = 90 , SD = 20
n=1e6 #Population Size
### Build the populations
#graph each (just to check your code)
#Sample with 10 people from each population and do each 10000 timess
# Get a vector of the differences between each sample mean
#plot the vector (this is the distrobution of mean differences of 1000 studies on this experiment)
# Run a boot.t test and get the pvalue
|
87862519f568de16a947dd685bfa61bf7b287406 | 49a6940dee400fe0953f5a6b7dc899f0297926f9 | /cleaning_data/no-cgm-or-a1c/Protocol_I.R | 4e23eeff72e712f5048979d8ba9c3bed30e2772a | [] | no_license | joshuagrossman/a1c | 947f50de2d760f6929531859110ca16db31a0545 | 0825dba7fe588f8915595bb947c15a3d0a636a69 | refs/heads/master | 2023-03-04T11:54:09.442051 | 2021-02-05T08:13:26 | 2021-02-05T08:13:26 | 197,865,786 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,790 | r | Protocol_I.R | ################################## LIBRARIES ###################################
source("lib/source.R")
################################## GLOBALS #####################################
data_path <- "data/Protocol_I/Data Tables"
cleaned_data_path <- "data/Protocol_I/clean"
data_name <- "Protocol_I"
################################# READ IN DATA #################################
dfs <- make_dfs(data_path)
#View(dfs)
cleaned_dfs <- list()
################################# CGM DATA #####################################
cleaned_dfs$cgm <-
dfs$IDataCGM %>%
filter(!is.na(Glucose)) %>%
mutate(datetime = convert_to_datetime(DeviceDtDaysFromEnroll, DeviceTm)) %>%
select(PtID, datetime, Glucose)
################################# HBA1C DATA ###################################
cleaned_dfs$a1c <-
dfs$IScreening %>%
mutate(date = convert_to_date(TestDtDaysAfterEnroll)) %>%
select(PtID, date, HbA1c)
################################# INSULIN DATA #################################
# TODO(Josh): Fix this if insulin data is useful
# cleaned_dfs$insulin <-
# dfs$HDataPump %>%
# mutate(datetime = convert_to_datetime(DeviceDtDaysFromEnroll, DeviceTm)) %>%
# select(PtID, datetime, ImmediateVol, ExtendedVol)
################################ STATIC MEASUREMENTS ###########################
IPtRoster_cleaned <-
dfs$IPtRoster %>%
select(PtID, AgeAsOfEnrollDt)
IScreening_cleaned <-
dfs$IScreening %>%
select(PtID, Gender, Ethnicity, Race, Weight, Height)
cleaned_dfs$measurements <-
full_join(IPtRoster_cleaned, IScreening_cleaned, by = "PtID")
################################ WRITE CSVS TO FILE ############################
write_main_csvs(cleaned_dfs, cleaned_data_path)
split_into_csvs(str_c(cleaned_data_path, "/cgm.csv"), data_name)
|
72f0d3b60ef64d865016352112de87761074e460 | e910b929a3b5d2e4c1628e22138254a1808d95c4 | /man/qseckw.Rd | 42e18ead971ed6367adf19ac2f2a401cdb3cfe26 | [] | no_license | cran/SecKW | 2e3d6d4acd400fffe7f4a16331a5054f957643dd | 80f09e2d00a51be56d3112efcffb93bd637c23fe | refs/heads/master | 2020-12-22T18:27:44.488677 | 2016-07-18T13:02:45 | 2016-07-18T13:02:45 | 236,889,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 916 | rd | qseckw.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qseckw.R
\name{qseckw}
\alias{qseckw}
\title{The cumulative function of the Secant Kumaraswamy Weibull probability distribution.}
\usage{
qseckw(p, a, b, c, lambda, lower = TRUE, log.p = FALSE)
}
\arguments{
\item{p}{Vector of probabilities.}
\item{a}{A parameter.}
\item{b}{B parameter.}
\item{c}{C parameter.}
\item{lambda}{Lambda parameter.}
\item{lower}{logical; if TRUE (default), probabilities are \eqn{P[X \leq x]} otherwise, \eqn{P[X > x]}.}
\item{log.p}{logical; if TRUE, probabilities p are given as log(p).}
}
\value{
A vector with n observations of the Secant Kumaraswamy Weibull distribution.
}
\description{
The cumulative function of the Secant Kumaraswamy Weibull probability distribution.
}
\examples{
qseckw(0.5, 1, 1, 1, 1, TRUE, FALSE)
qseckw(0.5, 3, 0.5, 2, 2, TRUE, FALSE)
}
|
9baa49e9061450422dd9f14c4cb0fe45b7e4cab5 | 1b925a76538fc8e59ea4e481d2871628d929d530 | /R/makeridgeplot.R | 7e1f87a23d55b4ab1b9992dff9ec1efed3b017d8 | [] | no_license | CASPResearch/ProvPack | 7217be03f0e531fa4ca1f68058a0a6ba5afefcc6 | 630b519f6939077dad69e5bfccf3b0eef054bcce | refs/heads/master | 2021-04-27T08:26:14.701309 | 2019-06-27T13:03:54 | 2019-06-27T13:03:54 | 122,489,213 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,164 | r | makeridgeplot.R | #' Make ridge plot of distributional data
#'
#' This function creates overlapping KDEs and plots them as an overlapping 'ridge plot'
#' inspired by the iconic "Unknown Pleasures" album cover by Joy Division. Whilst being very visually
#' attractive it also allows large amounts of KDEs to be plotted in one go, allowing for succint representation.
#' This also allows the different KDE's to be sorted by a clustering algorithm, which groups similar spectra
#' together according to their Kolmogorov-Smirnov statistic.
#'
#' For large datasets this can take a while but be patient!
#' @param distributional An object of class distributional (see "load_distributional"), e.g. Zircon ages, rutile ages etc...
#' @param scale This is the degree of overlap for the ridges. A value of 1 means that the largest overlap in the
#' plot touches the base of the ridge above. Default value is 10, adjust to see what works best for individual cases.
#' @param clustered Boolean, defaults to TRUE indicates whether the samples are to be sorted vertically according
#' to the similarity of their spectra. If FALSE, they are grouped according to their "typing"
#' @param from,to Minimum and maximum x value. Optional
#' @param samebandwidth Boolean, default FALSE. If TRUE, all KDEs are generated with the same bandwidth (not recommended)
#' @param normalise Boolean, default TRUE. All KDEs given same area.
#' @param adaptive Boolean, default TRUE. Geological spectra are often multi-modal, and so the KDE algorithms
#' should have an "adaptive" bandwidth which varies across the x-axis according to point density. This is broad
#' when data is sparse but narrow when data is dense. Turn off only if data is strongly unimodal.
#' @return Returns a ggplot object. Automatically plots it, but can be saved to variable as any normal ggplot object.
#' @keywords distributional, ridges, KDEs
#' @examples
#' \dontrun{ridgeplot <- makeridges(zircs)}
#' \dontrun{ridgeplot <- makeridges(zircs,samebandwidth = FALSE, normalise = FALSE, adaptive = FALSE)}
#' \dontrun{ridgeplot <- makeridges(zircs,clustered = FALSE, scale = 20)}
#' \dontrun{ridgeplot <- makeridges(zircs, from = -500, to = 2500)}
#' \dontrun{ridgeplot <- makeridges(zircs, clustered = FALSE)}
#' @export
makeridges <- function(distributional,
scale = 10,
clustered = T,
from = NA,
to = NA,
samebandwidth = F,
normalise = T,
adaptive = T) {
if(class(distributional)!= "distributional"){
print("Can only make ridgeplots for distributional objects!")
return(NULL)
}
kdesdf = provenance::KDEs(
distributional,
from = from,
to = to,
normalise = normalise,
samebandwidth = samebandwidth,
adaptive = adaptive
)
#Extract into a list of objects KDE
list_kdes = kdesdf$kdes
#generate a list and subseqently a dataframe of all the y coordinates
listys = lapply(list_kdes, function(x)
x$y)
df = as.data.frame(listys)
#extract the common x coordinates
commonx = list_kdes[[1]]$x
#add common x coordinates to the dataframe
df$x = commonx
#This basically reshapes the data so that x is common, and y's are different. It allows plotting of multiple lines simulataneously
df_melted <- reshape2::melt(df, id = "x")
#Adds a typing column by checking which type list each sample name belongs to. Complex code, don't be afraid.
df_melted$typing = as.character(lapply(df_melted$variable, function(x)
names(distributional$typing)[indx = which(sapply(distributional$typing, function(y)
is.element(x, y)))]))
#Orders the dataframe based on type so they plot in groups next to each other. Manually adjust for visual affect, e.g. by manually setting levels of "sandtype"
df_melted$typing <-
factor(df_melted$typing, levels = distributional$TrueOrder)
if (clustered) {
ks_dist <- as.dist(provenance::diss(distributional,method='KS'))
clust <- hclust(ks_dist, method= "ward.D")
names <- colnames(df)
clustorder <- names[rev(clust$order)]
clst <- factor(clustorder, levels = clustorder)
df_melted$variable <- factor(df_melted$variable, levels = clst)
sorted <- df_melted
} else {
sorted <- df_melted[order(df_melted$typing),]
sorted$variable <-
factor(sorted$variable, levels = unique(sorted$variable))
}
colnames(sorted) <- c("Variable", "Samples", "Density", "Type")
plot <- ggplot(data = sorted,
aes(
x = Variable,
y = Samples,
height = Density,
fill = Type,
group = Samples
)) +
ggridges::geom_density_ridges(stat = "identity",
scale = scale,
size = 0.3) +
scale_fill_manual(values = as.character(distributional$palette)) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_discrete(expand = c(0.00, 0)) +
theme_minimal()
return(plot)
}
|
29f1af810fb2925e1ded54820454f48ec4a25152 | fa79fb5281eb3d132993970cc11ca7f1e1cb5d33 | /metaanalysis_archive/analyze_expression_by_tissue.R | 39f8494401f33f309cd0993c25e7159a85021604 | [] | no_license | metabdel/motrpac_public_data_analysis | e9a72b42359e49a6a99e95aa8f557cfd95f84c3c | 3a9d81a0ea071d892bc9023474f4881c2b32b95e | refs/heads/master | 2023-03-02T12:03:51.024399 | 2021-02-10T00:20:59 | 2021-02-10T00:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,652 | r | analyze_expression_by_tissue.R | setwd('/Users/David/Desktop/MoTrPAC/PA_database')
library(metafor)
source('repos/motrpac/helper_functions.R')
# Get the datasets and their metadata
load("PADB_univariate_results_and_preprocessed_data_acute.RData")
acute_datasets = cohort_data
acute_metadata = cohort_metadata
load("PADB_univariate_results_and_preprocessed_data_longterm.RData")
longterm_datasets = cohort_data
longterm_metadata = cohort_metadata
# extract the data objects for the meta-analysis
acute_genes = rownames(acute_datasets[[1]]$gene_data)
for(i in 2:length(acute_datasets)){
if(length(acute_datasets[[i]])<3){next}
acute_genes = intersect(acute_genes,rownames(acute_datasets[[i]]$gene_data))
}
longterm_genes = rownames(longterm_datasets[[1]]$gene_data)
for(i in 2:length(longterm_datasets)){
if(length(longterm_datasets[[i]])<3){next}
longterm_genes = intersect(longterm_genes,rownames(longterm_datasets[[i]]$gene_data))
}
# get genes that are expressed
q_thr = 0.5
expressed_genes1 = list()
sum_counts1 = list()
for(i in 1:length(acute_datasets)){
if (length(acute_datasets[[i]])<3){next}
tissue = acute_metadata[[i]]$tissue
m = acute_datasets[[i]]$gene_data[acute_genes,]
m = apply(m,2,rank)
m = m/nrow(m)
curr_expressed = rowSums(m>q_thr)/ncol(m)
if(is.null(expressed_genes1[[tissue]])){
expressed_genes1[[tissue]] = curr_expressed
sum_counts1[[tissue]]=1
}
else{
expressed_genes1[[tissue]] = expressed_genes1[[tissue]] + curr_expressed
sum_counts1[[tissue]] = sum_counts1[[tissue]]+1
}
}
expressed_genes2 = list()
sum_counts2 = list()
for(i in 1:length(longterm_datasets)){
if (length(longterm_datasets[[i]])<3){next}
tissue = longterm_metadata[[i]]$tissue
m = longterm_datasets[[i]]$gene_data[longterm_genes,]
m = apply(m,2,rank)
m = m/nrow(m)
curr_expressed = rowSums(m>q_thr)/ncol(m)
if(is.null(expressed_genes2[[tissue]])){
expressed_genes2[[tissue]] = curr_expressed
sum_counts2[[tissue]] = 1
}
else{
expressed_genes2[[tissue]] = expressed_genes2[[tissue]] + curr_expressed
sum_counts2[[tissue]] = sum_counts2[[tissue]]+1
}
}
for(nn in names(expressed_genes1)){
expressed_genes1[[nn]] = expressed_genes1[[nn]] / sum_counts1[[nn]]
}
for(nn in names(expressed_genes2)){
expressed_genes2[[nn]] = expressed_genes2[[nn]] / sum_counts2[[nn]]
}
gs = intersect(acute_genes,longterm_genes)
plot(expressed_genes1$muscle[gs],expressed_genes2$muscle[gs])
plot(expressed_genes1$blood[gs],expressed_genes2$blood[gs])
tissue_expression_scores = list(longterm = expressed_genes2,acute=expressed_genes1,shared_genes=gs)
save(tissue_expression_scores,file="tissue_expression_scores.RData")
|
d4d8cf20eb110a7d765122dec8d58b8c27885a60 | c0e766a6a57e3c5c32f8b0afe130b8df66e6dbf9 | /rsellPoshmark/man/PM_Sales_Upload_Submit.Rd | 92f7718b209aa195b71e4bb39a46158e3b0b3e63 | [] | no_license | t2tech-corp/Rsell-Packages | b450fec180754aa9cf0cf3ab6b369c74c57b7e70 | 047a2348650e5a2ee0bc52500824a34f16167056 | refs/heads/main | 2023-03-03T00:22:15.006720 | 2021-02-14T22:58:12 | 2021-02-14T22:58:12 | 329,474,392 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 523 | rd | PM_Sales_Upload_Submit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PM_Sales_Upload_Submit.R
\name{PM_Sales_Upload_Submit}
\alias{PM_Sales_Upload_Submit}
\title{Submit Poshmark Sales Activity to Database}
\usage{
PM_Sales_Upload_Submit(sales_activity)
}
\arguments{
\item{sales_activity}{Poshmark Sales Activity Report}
}
\value{
sales_activity Poshmark Sales Activity Report with added Duplicate column
}
\description{
This function Submits the Poshmark Sales Activity to Database.
}
\details{
Requires: dplyr
}
|
291b84c4a41b54898a6cb3db979c11740d317a4e | 5af60ba162be455e6f1df3f68f72047aef59eccb | /man/align_lc.Rd | a363c43deb8ba60f463a8bf555c970bc25d061bc | [
"MIT"
] | permissive | bcjaeger/tblHelpers | b1e91e8a3a9d4382d9790c27ecf4551586168512 | 9fcfe4fc936f06406935c005c0428171bbe2a920 | refs/heads/master | 2021-01-03T02:47:58.961819 | 2020-02-20T02:14:02 | 2020-02-20T02:14:02 | 239,887,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 678 | rd | align_lc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ft_style.R
\name{align_lc}
\alias{align_lc}
\title{Left-center align}
\usage{
align_lc(object)
}
\arguments{
\item{object}{a \code{flextable} object.}
}
\value{
a \code{flextable} object with left-centered columns
}
\description{
This function takes care of some tedious stylistic
changes that users may want to automate. Specifically,
left aligning the far left column and center aligning
every other column.
}
\examples{
library(flextable)
library(dplyr)
iris \%>\%
group_by(Species) \%>\%
slice(1:3) \%>\%
ft_grouped_by() \%>\%
theme_vanilla() \%>\%
align_lc() \%>\%
pad_groups()
}
|
053c6c179b0500f9f46344f2310df2c3a16d098d | 175034b927dfde0bd0100c4be76a901324291470 | /exercise-2/exercise.R | 4cbae5ce6e895c97eac5c2505c9be0aeee6aad86 | [
"MIT"
] | permissive | AlexEarll/module9-dataframes | 358346285e7bb8cde28ef07e27abb212a88a92f0 | 65f26ada762c2e76a8d8553df1551bda2017d33e | refs/heads/master | 2021-01-11T18:57:09.673696 | 2017-01-19T23:20:02 | 2017-01-19T23:20:02 | 79,280,629 | 0 | 0 | null | 2017-01-17T22:50:17 | 2017-01-17T22:50:17 | null | UTF-8 | R | false | false | 2,553 | r | exercise.R | # Create a vector of 100 employees ("Employee 1", "Employee 2", ... "Employee 100)
# Hint: use the `paste()` function to produce the list
employees <- c(paste("Employee", 1:100))
print(employees)
# Create a vector of 100 random salaries for the year 2014
# Use the `runif()` function to pick a random number between 40000 and 50000
salaries.2014 <- runif(100, 40000, 50000)
# Create a vector of 100 salaries in 2015 that have increased from 2014 by some amount
# Hint: use `runif()` to add a random number to 2014's salaries. Starting from a
# _negative_ number so that salaries may decrease!
salaries.2015 <- salaries.2014 + runif(100,-1000,5000)
# Create a data.frame 'salaries' by combining the 3 vectors you just made
# Remember to set `stringsAsFactors=FALSE`!
table <- data.frame(employees, salaries.2014, salaries.2015, stringsAsFactors=FALSE)
# Create a column 'raise' that stores the size of the raise between 2014 and 2015
table["raise"] <- salaries.2015 - salaries.2014
# Create a column 'got.raise' that is TRUE if the person got a raise
table["got.raise"] <- salaries.2015 - salaries.2014 > 0
### Retrieve values from your data frame to answer the following questions
### Note that you should get the value as specific as possible (e.g., a single
### cell rather than the whole row!)
# What was the 2015 salary of employee 57
print((table$salaries.2015[57]))
# How many employees got a raise?
num.got.raise <- table$got.raise[table$got.raise == TRUE]
print(length(num.got.raise))
# What was the value of the highest raise?
max.raise <- table$raise[table$raise == max(table$raise)]
print(max.raise)
# What was the "name" of the employee who received the highest raise?
max.raise.name <- table$employees[table$raise == max(table$raise)]
print(max.raise.name)
# What was the largest decrease in salaries between the two years?
max.decrease <- table$raise[table$raise == min(table$raise)]
print(max.decrease)
# What was the name of the employee who recieved largest decrease in salary?
max.decrease.name <- table$employees[table$raise == min(table$raise)]
print(max.decrease.name)
# What was the average salary increase?
average.salary <- mean(table$raise)
print(average.salary)
### Bonus ###
# Write a .csv file of your salaries to your working directory
write.csv(table, file = "salaries.csv")
# For people who did not get a raise, how much money did they lose?
num.no.raise <- table$raise[table$got.raise == FALSE]
print(num.no.raise)
# Is that what you expected them to lose based on how you generated their salaries?
#wat
|
5a0c3ec2af6c0ad1cfcdaf88b8a942d7b412b5bd | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.inspector/man/describe_assessment_targets.Rd | d4f58ad1c12ec6b4b13dbcefaebb6d049e75c5f2 | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 950 | rd | describe_assessment_targets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.inspector_operations.R
\name{describe_assessment_targets}
\alias{describe_assessment_targets}
\title{Describes the assessment targets that are specified by the ARNs of the assessment targets}
\usage{
describe_assessment_targets(assessmentTargetArns)
}
\arguments{
\item{assessmentTargetArns}{[required] The ARNs that specifies the assessment targets that you want to describe.}
}
\description{
Describes the assessment targets that are specified by the ARNs of the assessment targets.
}
\section{Accepted Parameters}{
\preformatted{describe_assessment_targets(
assessmentTargetArns = list(
"string"
)
)
}
}
\examples{
# Describes the assessment targets that are specified by the ARNs of the
# assessment targets.
\donttest{describe_assessment_targets(
assessmentTargetArns = list(
"arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq"
)
)}
}
|
41abf2f0214333560d4babd3a4d1084c1233e41e | b1228c161b4b527503eab4ff22ba2d90f5b39ed4 | /cachematrix.R | e33b9bbbd4fbe35588775816cc7b53d980117a7e | [] | no_license | matheux/ProgrammingAssignment2 | 6fd7ad8ae93fe7c35903b6b371345c402b9e4aff | ac283e26976df2f4ea9ce02fab9d04baf3407691 | refs/heads/master | 2021-01-20T16:44:40.414160 | 2014-11-22T18:36:00 | 2014-11-22T18:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 956 | r | cachematrix.R | ## Below functions are meant to be used to calculate the inverse of a matrix
## Because it might be a time consuming operation (if the matrix is big), one might
## not want to do this twice for the same matrix. Those function allow this.
## Creates a special "matrix"-like object that caches its inverse, when its calculated
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function calculates the inverse of a matrix. If it has already been calculated, then
## the function would retrieve the inverse from cache
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
|
8652b1a3b4f9525b28b3aee6848850fc574f6ef4 | d0ceb8abe592f2bc3c14ee230c1fab9ee1052e9a | /sandbox/R/normalIncrementalVaR.R | 18ec8ed9eb76c20ccc14f95d46fe794576fda5c6 | [] | no_license | Jicheng-Yan/FactorAnalytics | 7c8afce66a7ea743883aaedd9382a8d7703a5606 | e9a79f7a759a43f21744817ca550cfab9d3c3d5b | refs/heads/master | 2020-12-25T05:02:35.257591 | 2014-06-12T23:40:32 | 2014-06-12T23:40:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,138 | r | normalIncrementalVaR.R | #' compute normal incremental VaR given portfolio weights, mean vector and
#' covariance matrix.
#'
#' compute normal incremental VaR given portfolio weights, mean vector and
#' covariance matrix. Incremental VaR is defined as the change in portfolio VaR
#' that occurs when an asset is removed from the portfolio.
#'
#'
#' @param mu n x 1 vector of expected returns.
#' @param Sigma n x n return covariance matrix.
#' @param w n x 1 vector of portfolio weights.
#' @param tail.prob scalar tail probability.
#' @return n x 1 vector of incremental VaR values
#' @author Eric Zivot and Yi-An Chen
#' @references Jorian (2007) pg. 168
#' @examples
#'
#' normalIncrementalVaR(mu=c(1,2),Sigma=matrix(c(1,0.5,0.5,2),2,2),w=c(0.5,0.5),tail.prob = 0.01)
#'
normalIncrementalVaR <-
function(mu, Sigma, w, tail.prob = 0.01) {
## compute normal incremental VaR given portfolio weights, mean vector and
## covariance matrix
## Incremental VaR is defined as the change in portfolio VaR that occurs
## when an asset is removed from the portfolio
## inputs:
## mu n x 1 vector of expected returns
## Sigma n x n return covariance matrix
## w n x 1 vector of portfolio weights
## tail.prob scalar tail probability
## output:
## iVaR n x 1 vector of incremental VaR values
## References:
## Jorian (2007) pg. 168
mu = as.matrix(mu)
Sigma = as.matrix(Sigma)
w = as.matrix(w)
if ( nrow(mu) != nrow(Sigma) )
stop("mu and Sigma must have same number of rows")
if ( nrow(mu) != nrow(w) )
stop("mu and w must have same number of elements")
if ( tail.prob < 0 || tail.prob > 1)
stop("tail.prob must be between 0 and 1")
n.w = nrow(mu)
## portfolio VaR with all assets
pVaR = crossprod(w, mu) + sqrt( t(w) %*% Sigma %*% w ) * qnorm(tail.prob)
temp.w = w
iVaR = matrix(0, n.w, 1)
for (i in 1:n.w) {
## set weight for asset i to zero and renormalize
temp.w[i,1] = 0
temp.w = temp.w/sum(temp.w)
pVaR.new = crossprod(temp.w, mu) + sqrt( t(temp.w) %*% Sigma %*% temp.w ) * qnorm(tail.prob)
iVaR[i,1] = pVaR.new - pVaR
## reset weight
temp.w = w
}
return(-iVaR)
}
|
23cf953c00ecc883484c60938ec97b4ed91170fa | ae35a82f670cc677c06ab201a014127f1c821fd9 | /sand/inst/code/chapter4.R | b0e518e70d1520bb3457cfcc04b8e6ea4c7a088f | [] | no_license | masanao-yajima/sand | 676d5bb99b9b0185da1bf669dc39d158f9354a70 | 16a0227562614a0fd7381f08d98ef1b8145566a9 | refs/heads/master | 2021-01-23T03:28:02.674213 | 2017-03-24T16:04:22 | 2017-03-24T16:04:22 | 86,083,383 | 1 | 1 | null | 2017-03-24T15:32:43 | 2017-03-24T15:32:43 | null | UTF-8 | R | false | false | 8,164 | r | chapter4.R | # SAND with R, chapter4.tex
# CHUNK 1
library(sand)
data(karate)
hist(degree(karate), col="lightblue", xlim=c(0,50),
xlab="Vertex Degree", ylab="Frequency", main="")
# CHUNK 2
hist(graph.strength(karate), col="pink",
xlab="Vertex Strength", ylab="Frequency", main="")
# CHUNK 3
library(igraphdata)
data(yeast)
# CHUNK 4
ecount(yeast)
# ---
## [1] 11855
# ---
# CHUNK 5
vcount(yeast)
# ---
## [1] 2617
# ---
# CHUNK 6
d.yeast <- degree(yeast)
hist(d.yeast,col="blue",
xlab="Degree", ylab="Frequency",
main="Degree Distribution")
# CHUNK 7
dd.yeast <- degree.distribution(yeast)
d <- 1:max(d.yeast)-1
ind <- (dd.yeast != 0)
plot(d[ind], dd.yeast[ind], log="xy", col="blue",
xlab=c("Log-Degree"), ylab=c("Log-Intensity"),
main="Log-Log Degree Distribution")
# CHUNK 8
a.nn.deg.yeast <- graph.knn(yeast,V(yeast))$knn
plot(d.yeast, a.nn.deg.yeast, log="xy",
col="goldenrod", xlab=c("Log Vertex Degree"),
ylab=c("Log Average Neighbor Degree"))
# CHUNK 9
A <- get.adjacency(karate, sparse=FALSE)
library(network)
g <- network::as.network.matrix(A)
library(sna)
sna::gplot.target(g, degree(g), main="Degree",
circ.lab = FALSE, circ.col="skyblue",
usearrows = FALSE,
vertex.col=c("blue", rep("red", 32), "yellow"),
edge.col="darkgray")
# CHUNK 10
l <- layout.kamada.kawai(aidsblog)
plot(aidsblog, layout=l, main="Hubs", vertex.label="",
vertex.size=10 * sqrt(hub.score(aidsblog)$vector))
plot(aidsblog, layout=l, main="Authorities",
vertex.label="", vertex.size=10 *
sqrt(authority.score(aidsblog)$vector))
# CHUNK 11
eb <- edge.betweenness(karate)
E(karate)[order(eb, decreasing=T)[1:3]]
# ---
## Edge sequence:
##
## [53] John A -- Actor 20
## [14] Actor 20 -- Mr Hi
## [16] Actor 32 -- Mr Hi
# ---
# CHUNK 12
table(sapply(cliques(karate), length))
# ---
##
## 1 2 3 4 5
## 34 78 45 11 2
# ---
# CHUNK 13
cliques(karate)[sapply(cliques(karate), length) == 5]
# ---
## [[1]]
## [1] 1 2 3 4 8
##
## [[2]]
## [1] 1 2 3 4 14
# ---
# CHUNK 14
table(sapply(maximal.cliques(karate), length))
# ---
##
## 2 3 4 5
## 11 21 2 2
# ---
# CHUNK 15
clique.number(yeast)
# ---
## [1] 23
# ---
# CHUNK 16
cores <- graph.coreness(karate)
sna::gplot.target(g, cores, circ.lab = FALSE,
circ.col="skyblue", usearrows = FALSE,
vertex.col=cores, edge.col="darkgray")
detach("package:network")
detach("package:sna")
# CHUNK 17
aidsblog <- simplify(aidsblog)
dyad.census(aidsblog)
# ---
## $mut
## [1] 3
##
## $asym
## [1] 177
##
## $null
## [1] 10405
# ---
# CHUNK 18
ego.instr <- induced.subgraph(karate,
neighborhood(karate, 1, 1)[[1]])
ego.admin <- induced.subgraph(karate,
neighborhood(karate, 1, 34)[[1]])
graph.density(karate)
# ---
## [1] 0.1390374
# ---
graph.density(ego.instr)
# ---
## [1] 0.25
# ---
graph.density(ego.admin)
# ---
## [1] 0.2091503
# ---
# CHUNK 19
transitivity(karate)
# ---
## [1] 0.2556818
# ---
# CHUNK 20
transitivity(karate, "local", vids=c(1,34))
# ---
## [1] 0.1500000 0.1102941
# ---
# CHUNK 21
reciprocity(aidsblog, mode="default")
# ---
## [1] 0.03278689
# ---
reciprocity(aidsblog, mode="ratio")
# ---
## [1] 0.01666667
# ---
# CHUNK 22
is.connected(yeast)
# ---
## [1] FALSE
# ---
# CHUNK 23
comps <- decompose.graph(yeast)
table(sapply(comps, vcount))
# ---
##
## 2 3 4 5 6 7 2375
## 63 13 5 6 1 3 1
# ---
# CHUNK 24
yeast.gc <- decompose.graph(yeast)[[1]]
# CHUNK 25
average.path.length(yeast.gc)
# ---
## [1] 5.09597
# ---
# CHUNK 26
diameter(yeast.gc)
# ---
## [1] 15
# ---
# CHUNK 27
transitivity(yeast.gc)
# ---
## [1] 0.4686663
# ---
# CHUNK 28
vertex.connectivity(yeast.gc)
# ---
## [1] 1
# ---
edge.connectivity(yeast.gc)
# ---
## [1] 1
# ---
# CHUNK 29
yeast.cut.vertices <- articulation.points(yeast.gc)
length(yeast.cut.vertices)
# ---
## [1] 350
# ---
# CHUNK 30
is.connected(aidsblog, mode=c("weak"))
# ---
## [1] TRUE
# ---
# CHUNK 31
is.connected(aidsblog, mode=c("strong"))
# ---
## [1] FALSE
# ---
# CHUNK 32
aidsblog.scc <- clusters(aidsblog, mode=c("strong"))
table(aidsblog.scc$csize)
# ---
##
## 1 4
## 142 1
# ---
# CHUNK 33
kc <- fastgreedy.community(karate)
# CHUNK 34
length(kc)
# ---
## [1] 3
# ---
sizes(kc)
# ---
## Community sizes
## 1 2 3
## 18 11 5
# ---
# CHUNK 35
membership(kc)
# ---
## Mr Hi Actor 2 Actor 3 Actor 4 Actor 5 Actor 6
## 2 2 2 2 3 3
## Actor 7 Actor 8 Actor 9 Actor 10 Actor 11 Actor 12
## 3 2 1 1 3 2
## Actor 13 Actor 14 Actor 15 Actor 16 Actor 17 Actor 18
## 2 2 1 1 3 2
## Actor 19 Actor 20 Actor 21 Actor 22 Actor 23 Actor 24
## 1 2 1 2 1 1
## Actor 25 Actor 26 Actor 27 Actor 28 Actor 29 Actor 30
## 1 1 1 1 1 1
## Actor 31 Actor 32 Actor 33 John A
## 1 1 1 1
# ---
# CHUNK 36
plot(kc,karate)
# CHUNK 37
library(ape)
dendPlot(kc, mode="phylo")
# CHUNK 38
k.lap <- graph.laplacian(karate)
eig.anal <- eigen(k.lap)
# CHUNK 39
plot(eig.anal$values, col="blue",
ylab="Eigenvalues of Graph Laplacian")
# CHUNK 40
f.vec <- eig.anal$vectors[, 33]
# CHUNK 41
faction <- get.vertex.attribute(karate, "Faction")
f.colors <- as.character(length(faction))
f.colors[faction == 1] <- "red"
f.colors[faction == 2] <- "cyan"
plot(f.vec, pch=16, xlab="Actor Number",
ylab="Fiedler Vector Entry", col=f.colors)
abline(0, 0, lwd=2, col="lightgray")
# CHUNK 42
func.class <- get.vertex.attribute(yeast.gc, "Class")
table(func.class)
# ---
## func.class
## A B C D E F G M O P R T U
## 51 98 122 238 95 171 96 278 171 248 45 240 483
# ---
# CHUNK 43
yc <- fastgreedy.community(yeast.gc)
c.m <- membership(yc)
# CHUNK 44
table(c.m, func.class, useNA=c("no"))
# ---
## func.class
## c.m A B C D E F G M O P R T U
## 1 0 0 0 1 3 7 0 6 3 110 2 35 14
## 2 0 2 2 7 1 1 1 4 39 5 0 4 27
## 3 1 9 7 18 4 8 4 20 10 23 8 74 64
## 4 25 11 10 22 72 84 81 168 14 75 16 27 121
## 5 1 7 5 14 0 4 0 2 3 6 1 34 68
## 6 1 24 1 4 1 4 0 7 0 1 0 19 16
## 7 6 18 6 76 7 9 3 7 8 5 1 7 33
## 8 8 12 67 59 1 34 0 19 60 10 7 6 73
## 9 4 1 7 7 2 10 5 3 2 0 3 0 11
## 10 0 0 0 6 0 0 0 2 0 5 0 11 1
## 11 0 9 0 10 1 3 0 0 0 0 0 2 4
## 12 0 1 3 0 0 0 0 6 10 0 0 0 2
## 13 0 1 1 2 0 1 0 0 2 0 0 16 10
## 14 1 0 4 1 0 1 0 0 4 0 1 0 11
## 15 0 1 0 0 0 2 0 2 0 0 1 0 8
## 16 0 1 2 0 0 1 0 0 10 0 0 0 0
## 17 0 0 1 3 0 0 0 2 0 0 0 2 3
## 18 0 0 0 0 3 1 0 9 0 0 1 0 1
## 19 0 1 1 1 0 0 0 0 0 0 0 0 3
## 20 0 0 0 6 0 0 0 1 0 0 0 1 2
## 21 1 0 0 0 0 0 0 0 6 0 0 1 0
## 22 0 0 0 0 0 0 0 1 0 0 0 0 8
## 23 0 0 0 0 0 0 0 4 0 0 0 0 0
## 24 0 0 0 0 0 0 2 2 0 0 0 1 0
## 25 0 0 0 0 0 0 0 5 0 0 0 0 0
## 26 0 0 1 0 0 0 0 4 0 0 1 0 1
## 27 3 0 4 0 0 1 0 0 0 0 0 0 0
## 28 0 0 0 0 0 0 0 0 0 6 0 0 0
## 29 0 0 0 1 0 0 0 1 0 0 3 0 0
## 30 0 0 0 0 0 0 0 0 0 2 0 0 2
## 31 0 0 0 0 0 0 0 3 0 0 0 0 0
# ---
# CHUNK 45
assortativity.nominal(yeast, (V(yeast)$Class=="P")+1,
directed=FALSE)
# ---
## [1] 0.4965229
# ---
# CHUNK 46
assortativity.degree(yeast)
# ---
## [1] 0.4610798
# ---
|
2af0751c83e7d9b071579a1d49fe8b44609badda | 65c8daa4013d472247dcd245785903945d1cadfc | /man/projsplx.Rd | 4c1ab2c35da27595f9a9f4a01d1b75ab61900d89 | [
"MIT"
] | permissive | HansonMenghan/alstructure | 7c6907adad6b17ba66a3f41ff3511d549595c245 | e3554111e746a2359675d6615459af8579b69a14 | refs/heads/master | 2023-03-20T08:11:25.527193 | 2018-05-26T18:30:10 | 2018-05-26T18:30:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 873 | rd | projsplx.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/factor.R
\name{projsplx}
\alias{projsplx}
\title{Project a vector onto the simplex}
\usage{
projsplx(y)
}
\arguments{
\item{y}{a \eqn{n}{n} dimensional vector}
}
\value{
a \eqn{n}{n} dimensional vector which is the projection of \eqn{y}{y} onto
the \eqn{n}{n} dimensional simplex
}
\description{
Algorithm from (Y. Chen and Ye 2011) to project an \eqn{n}{n} dimensional vector onto the \eqn{n}{n} dimensional
simplex (i.e. the set of points in \eqn{x \in \mathcal{R}^n}{x in R^n} such that
\eqn{\sum_i x_i = 1}{x_1 + x_2 + ... + x_n= 1} and \eqn{x_i > 0}{x_i > 0} for all \eqn{i}{i} ). This is used to enforce the admixture constraints in
the \code{uALS} algorithm.
}
\references{
Chen, Y., and X. Ye. 2011. “Projection Onto A Simplex.” ArXiv E-Prints, January.
}
\keyword{internal}
|
748b7f4c878fe57897684cc733ce0a2279ff0604 | c4ded5120db258ebdd2cf143fbf3bee2fa0065b9 | /src/song_lyrics/50years_billboard_hot_100/tidy/data.R | 138d8f4b26faf5856793180cd3c2dbca2e59e507 | [] | no_license | rlads2021/project-spacegray3128 | c0be0b041d9567a0e805854b8de1ebf8a08c14da | 203dd49db14367bc6bbadafd93094e4aa4d2e988 | refs/heads/master | 2023-06-07T08:34:31.696514 | 2021-06-23T14:23:12 | 2021-06-23T14:23:12 | 379,555,786 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 454 | r | data.R | library(httr)
library(rvest)
library(stringr)
#用gather轉換
df_artists %>%
mutate(Rank = 1:100) %>%
gather("Year", "Artist", 1:50) -> df_Artist
df_titles %>%
mutate(Rank = 1:100) %>%
gather("Year", "Title", 1:50) -> df_Title
df_lyrics %>%
mutate(Rank = 1:100) %>%
gather("Year", "Lyrics", 1:50) -> df_Lyrics
#最終的Dataframe
df_All <- df_Title %>%
mutate(Artist = df_Artist$Artist) #,
#Lyrics = df_Lyrics$Lyrics)
|
6d3ed89fc64a0e10260a6aa09d026fcbadb319b8 | 2f95a5177b0c47e8af4163196c79539bbb6cc499 | /R-old/PD52.R | 888af130e7eb1eb9fcad97e6f4ac1ab6fddb30b9 | [] | no_license | pipetcpt/study-pkpd | 1bd270a05cbaaa82f77ddfdfb3f84f9eb1e417fd | a702cb5b6f56fe2c5236582b34213dc49a5c82ff | refs/heads/master | 2020-06-19T01:04:41.186453 | 2019-11-07T10:26:10 | 2019-11-07T10:26:10 | 196,513,459 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,093 | r | PD52.R | # PD 52
require(wnl)
setwd("D:/Rt/PD")
dPD52 = read.csv("PD52.csv")
colnames(dPD52) = c("TIME", "DV", "ID", "DOSE")
IDs = unique(dPD52[,"ID"]) ; IDs
nID = length(IDs) ; nID
AMTs = unique(dPD52[,"DOSE"]) ; AMTs
require(deSolve)
# First order biophse model
fPD52ade = function(t, y, p)
{
Cp = AMTs[i]*p["Kp"]*t*exp(-p["Kp"]*t)
Stim = p["Smax"]*Cp^p["n"]/(p["SD50"]^p["n"] + Cp^p["n"])
Kut = p["Kout"]/(p["Km"] + y[1])
dy1dt = Stim - Kut*y[1]
return(list(c(dy1dt)))
}
fPD52a = function(THETA)
{
Kp = THETA[1]
n = THETA[2]
Km = THETA[3]/1e3
Kout = THETA[4]
Smax = THETA[5]
SD50 = THETA[6]
y = vector()
for (i in 1:nID) {
i <<- i
cID = IDs[i]
cTIME = unique(c(0, dPD52[dPD52$ID == cID, "TIME"]))
iTIME = cTIME %in% dPD52[dPD52$ID == cID, "TIME"]
cy = lsoda(c(0), cTIME, fPD52ade, c(Kp=Kp, n=n, Km=Km, Kout=Kout, Smax=Smax, SD50=SD50))
y = c(y, cy[iTIME, "1"])
}
return(y)
}
y0a = fPD52a(c(5.93, 1.68, 1, 30.5, 244, 0.9842)) ; y0a
length(y0a)
r1 = nlr(fPD52a, dPD52, c("Kp", "n", "Km", "Kout", "Smax", "SD50"), c(10, 2, 0.001, 30, 240, 5)) ; r1
# Bolus approximation model
fPD52bde = function(t, y, p)
{
Cp = AMTs[i]*exp(-p["Kp"]*t)
Stim = p["Smax"]*Cp^p["n"]/(p["SD50"]^p["n"] + Cp^p["n"])
Kut = p["Kout"]/(p["Km"] + y[1])
dy1dt = Stim - Kut*y[1]
return(list(c(dy1dt)))
}
fPD52b = function(THETA)
{
Kp = THETA[1]
n = THETA[2]
Km = THETA[3]/1e3
Kout = THETA[4]
Smax = THETA[5]
SD50 = THETA[6]
y = vector()
for (i in 1:nID) {
i <<- i
cID = IDs[i]
cTIME = unique(c(0, dPD52[dPD52$ID == cID, "TIME"]))
iTIME = cTIME %in% dPD52[dPD52$ID == cID, "TIME"]
cy = lsoda(c(0), cTIME, fPD52bde, c(Kp=Kp, n=n, Km=Km, Kout=Kout, Smax=Smax, SD50=SD50))
y = c(y, cy[iTIME, "1"])
}
return(y)
}
y0b = fPD52b(c(2.49348, 2.02, 1, 32.52, 195, 1.556)) ; y0b
length(y0b)
r2 = nlr(fPD52b, dPD52, c("Kp", "n", "Km", "Kout", "Smax", "SD50"), c(6, 1.7, 0.001, 30, 240, 1)) ; r2
# Figure 52.1
plot(0, 0, type="n", xlim=c(0, 3.5), ylim=c(0, 80), xlab="Time (h)", ylab="Locomotor activity")
abline(h=1:8*10, lty=3)
for (i in 1:nID) {
cID = IDs[i]
points(dPD52[dPD52$ID == cID, "TIME"], dPD52[dPD52$ID == cID, "DV"], pch=17-i, col=c("red", "blue")[i])
lines(dPD52[dPD52$ID == cID, "TIME"], y0a[dPD52$ID == cID])
lines(dPD52[dPD52$ID == cID, "TIME"], y0b[dPD52$ID == cID], lty=2)
}
# Refined model
fPD52cde = function(t, y, p)
{
Cp = AMTs[i]*exp(-p["Kp"]*t)
Stim = p["Smax"]*Cp^p["n"]/(p["SD50"]^p["n"] + Cp^p["n"])
Kut = p["Kout"]/(1e-8 + y[1])
dy1dt = Stim - Kut*y[1]
return(list(c(dy1dt)))
}
fPD52c = function(THETA)
{
Kp = THETA[1]
n = THETA[2]
Kout = THETA[3]
Smax = THETA[4]
SD50 = THETA[5]
y = vector()
for (i in 1:nID) {
i <<- i
cID = IDs[i]
cTIME = unique(c(0, dPD52[dPD52$ID == cID, "TIME"]))
iTIME = cTIME %in% dPD52[dPD52$ID == cID, "TIME"]
cy = lsoda(c(1e-8), cTIME, fPD52cde, c(Kp=Kp, n=n, Kout=Kout, Smax=Smax, SD50=SD50))
y = c(y, cy[iTIME, "1"])
}
return(y)
}
y0c = fPD52c(c(2.49348, 2.02, 32.52, 195, 1.556)) ; y0c
length(y0c)
r3 = nlr(fPD52c, dPD52, c("Kp", "n", "Kout", "Smax", "SD50"), c(6, 1.7, 30, 240, 1)) ; r3
dx(r3)
# Alternative model 2
fPD52dde = function(t, y, p)
{
Cp = AMTs[i]*exp(-p["Kp"]*t)
Stim = 1 + p["Smax"]*Cp^p["n"]/(p["SD50"]^p["n"] + Cp^p["n"])
dy1dt = p["Kin"]*Stim - p["Kout"]*y[1]
return(list(c(dy1dt)))
}
fPD52d = function(THETA)
{
Kp = THETA[1]
n = THETA[2]
Kin = THETA[3]
Kout = THETA[4]
Smax = THETA[5]
SD50 = THETA[6]
y = vector()
for (i in 1:nID) {
i <<- i
cID = IDs[i]
cTIME = unique(c(0, dPD52[dPD52$ID == cID, "TIME"]))
iTIME = cTIME %in% dPD52[dPD52$ID == cID, "TIME"]
cy = lsoda(c(Kin/Kout), cTIME, fPD52dde, c(Kp=Kp, n=n, Kin=Kin, Kout=Kout, Smax=Smax, SD50=SD50))
y = c(y, cy[iTIME, "1"])
}
return(y)
}
y0d = fPD52d(c(2.49348, 2.02, 0.1, 32.52, 195, 1.556)) ; y0d
length(y0d)
r4 = nlr(fPD52d, dPD52, c("Kp", "n", "Kin", "Kout", "Smax", "SD50"), c(6, 1.7, 0.1, 30, 240, 1)) ; r4
dx(r4)
|
a9e8791013a2334942f1de55847aeefbb234b18c | 9c22b7117aefe645d9f6d21c9962bcc043547a8e | /man/cNames.Rd | edb55524415560edab51dcf94d7a0fa31351cd1a | [] | no_license | Farabell/MarIOGraphing | ff28322fda76aa2a3b0a29c28fc52da8251d976a | 1fafba71cb07b96cea8131e812f388d1194d5a65 | refs/heads/master | 2016-09-14T00:47:50.466392 | 2016-04-27T03:18:08 | 2016-04-27T03:18:08 | 57,167,067 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 408 | rd | cNames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cNames.R
\name{cNames}
\alias{cNames}
\title{DF Naming Function}
\usage{
cNames(x)
}
\arguments{
\item{x}{A df}
}
\value{
renamed columns
}
\description{
This function allows naming of 5 column df to predetermined
names.
Required to implement maxfit() function.
}
\examples{
cNames(YI1)
cNames(CM)
}
\author{
Kristen Rodriguez
}
|
6d0a10781586581994b4b928c391819efd0c3129 | 29585dff702209dd446c0ab52ceea046c58e384e | /simba/R/makead.R | d2220d3cfaef1c2a460bc3ff1dab85015ae73de4 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,724 | r | makead.R | "makead" <-
function(nspec, nplots, avSR=NULL, anc=NULL, grad.v=NULL, cf=0.2, puq=0.01) {
# grad.v is a vector describing the gradient setting, if it is set a gradient is applied.
# puq gives the proportion of ubiquituous species which are allowed to grow everywhere
# if ancestor is given, nspec, nplots and avSR is generated from this matrix, but avSR can still be changed
if (!is.null(anc)){
nspec <- ncol(anc)
nplots <- nrow(anc)
if (is.null(avSR)){
anc <- ifelse(anc > 0, 1, 0)
avSR <- mean(rowSums(anc))
}
}
probv <- 1/c(2:(nspec+1))^cf
probv <- probv - min(probv) + 1/nplots
probv <- probv/max(probv)
mat <- matrix(NA, nplots, nspec)
for(i in 1:nspec) {
mat[,i] <- as.vector(rbind(matrix(1, round(nplots*probv[i],0), 1), matrix(0, (nplots-round(nplots*probv[i],0)), 1)))
}
mat <- apply(mat, 2, "sample")
if (!is.null(grad.v)){
if (!is.null(puq)){
nuq <- round(nspec*puq, 0)
vuq <- rep(1, nuq)
vuq <- sample(c(vuq,vuq-1))
vuq2 <- rep(0, ncol(mat)-length(vuq))
vuq <- c(vuq, vuq2)
vuqs <- vuq == 1
vrest <- vuq == 0
muq <- mat[,vuqs]
muq <- muq + matrix(rnorm(ncol(muq)*nrow(muq), mean=0.5, sd=0.2), nrow(muq), ncol(muq))
mgrad <- mat[,vrest]
}
else {
mgrad <- mat
muq <- NULL
}
divid <- runif(ncol(mgrad))
divid1 <- ifelse(divid <= 0.5, TRUE, FALSE)
divid2 <- ifelse(divid <= 0.5, FALSE, TRUE)
grad.v <- as.numeric(grad.v)
vgrad1 <- grad.v/max(grad.v)
vgrad2 <- 1-vgrad1
mgrad1 <- mgrad[,divid1]*vgrad1
mgrad2 <- mgrad[,divid2]*vgrad2
mrand <- matrix(rnorm(ncol(mgrad)*nrow(mgrad), mean=0.5, sd=0.2), nrow(mgrad), ncol(mgrad))
mrand <- ifelse(mrand<0, 0, mrand)
mgrad1 <- mgrad1 + mrand[,divid1]
mgrad2 <- mgrad2 + mrand[,divid2]
mat <- cbind(muq, mgrad1, mgrad2)
contr <- nspec*nplots
x.f <- 1
for (i in 1:100){
mat.tmp <- ifelse(mat>=(i/100), 1, 0)
x.f <- ifelse(abs(sum(mat.tmp)-avSR*nplots)<=contr,i/100, x.f) # keep optimum value
contr <- ifelse(abs(sum(mat.tmp)-avSR*nplots)<=contr, abs(sum(mat.tmp)-avSR*nplots), contr)
}
mat <- ifelse(mat>=x.f, 1, 0)
##save rare species
##mat[1,colSums(mat)==0] <- 1
nspec <- c(ncol(muq), ncol(mgrad1), ncol(mgrad2))
}
mat <- mat[,c(sample(c(1:ncol(mat))))]
mat <- data.frame(mat, row.names=as.character(c(1:nrow(mat))))
names(mat) <- as.character(c(1:ncol(mat)))
attr(mat, "lengths") <- nspec
return(mat)
} |
b34ee6ef05c86e50dde4ad8bb2424193ab9ddece | ee056e185f00f9d3918a1338261b4b8633cb48bc | /man/bnormnlr-package.Rd | 1efa6f8098420b341c6599b32b685e16bbec1467 | [] | no_license | cran/bnormnlr | 773c7dbdea8aff88829b0a36fcd74df7f8e067aa | 5a07c0e7a633db4f825a90dcd22f845c56a1ac88 | refs/heads/master | 2020-04-04T22:11:22.316786 | 2014-12-08T00:00:00 | 2014-12-08T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,933 | rd | bnormnlr-package.Rd | \name{bnormnlr-package}
\alias{bnormnlr-package}
\alias{bnormnlr}
\docType{package}
\title{
Bayesian Estimation for Normal Heteroscedastic Nonlinear Regression Models
}
\description{
Implementation of Bayesian estimation in normal heteroscedastic nonlinear regression Models following Cepeda-Cuervo, (2001).
}
\details{
\tabular{ll}{
Package: \tab bnormnlr\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-12-09\cr
License: \tab GPL-2\cr
}
The package provides three functions: bnlr to perform Bayesian estimation for heteroscedastic normal nonlinear regression models; chainsum to summarize the MCMC chains obtained from bnlr and infocrit to extract information criteria measures from the model fit.
}
\author{
Nicolas Molano-Gonzalez, Marta Corrales Bossio, Maria Fernanda Zarate, Edilberto Cepeda-Cuervo.
Maintainer: Nicolas Molano-Gonzalez <[email protected]>
}
\references{
Cepeda-Cuervo, E. (2001). Modelagem da variabilidade em modelos lineares generalizados. Unpublished Ph.D. tesis. Instituto de Matematicas. Universidade Federal do Rio do Janeiro.
Cepeda-Cuervo, E. and Gamerman, D. (2001). Bayesian modeling of variance heterogeneity in normal regression models. Brazilian Journal of Probability and Statistics 14.1: 207-221.
Cepeda-Cuervo, E. and Achcar, J.A. (2010). Heteroscedastic nonlinear regression models. Communications in Statistics-Simulation and Computation 39.2 : 405-419.
}
\keyword{ package }
\examples{
utils::data(muscle, package = "MASS")
###mean and variance functions
fmu<-function(param,cov){ param[1] + param[2]*exp(-cov/exp(param[3]))}
fsgma<-function(param,cov){drop(exp(cov\%*\%param))}
##Note: use more MCMC chains (i.e NC=10000) for more accurate results.
m1b<-bnlr(y=muscle$Length,f1=fmu,f2=fsgma,x=muscle$Conc,
z=cbind(1,muscle$Conc),bta0=c(20,-30,0),gma0=c(2,0),Nc=1200)
chainsum(m1b$chains,burn=1:200)
infocrit(m1b,1:8000)
}
|
52c66d8f1ee974c7e4b4fef46554863566bca218 | 71e2d1ae54ecc891532f1f08480571a66f6185a8 | /Script/Thesis_data processing_env.R | 4b840bf75482a0e3fb55bc30fdd0896db43cdd52 | [] | no_license | Anhbt95/IMBRSea_Thesis | 739d117feafc67419cc0de4f10ae44d7a0a5b34c | ff1d399ef3ff8dbeca8d7c1f5824d72f9a3b7a28 | refs/heads/master | 2022-10-23T10:31:03.387136 | 2020-06-14T22:07:46 | 2020-06-14T22:07:46 | 272,233,430 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,885 | r | Thesis_data processing_env.R | # IMBRSea Thesis
# Tuan Anh Bui
# 18.04.2020
# Belgian beam trawl Discard spatial analysis
# This is the script for data processing - environmental variables
# Env vars: bathy, slope, chl, sst, substrate (mud, gravel, sand)
########################################
# Load support files and packages -----------------------------------------
source("HighstatLibV12.R")
# install.packages(c("cowplot", "googleway", "ggplot2", "ggrepel",
# "ggspatial", "libwgeom", "sf", "rnaturalearth", "rnaturalearthdata"))
library(data.table)
#library(COSTeda)
library(lattice); #library(nnet); library(tcltk); library(tcltk2)
library(sf)
library(rnaturalearth)
library(rnaturalearthdata)
library(INLA)
library(sp)
library(raster)
library(tidyverse)
library(lubridate)
library(RColorBrewer)
library(rgdal)
#install.packages("ggpubr")
library(ggpubr)
#install.packages("ggridges")
library(ggridges)
#library(dismo)
#library(splancs)
#library(reshape)
#library(gstat)
#library(rgeos)
#library(sdmpredictors) # to get Bathymetry
#library(fields)
# library(devtools)
#install_github('timcdlucas/INLAutils', force = T)
#devtools::install_github("timcdlucas/INLAutils")
# library(INLAutils)
#install.packages("inlabru")
#library(inlabru)
#install_github("gfalbery/ggregplot")
#library(ggregplot)
########################################
# Load data ---------------------------------------------------------------
# Indicate local directory
# dir_data = "C:/Users/tbui/Thesis_R/Data" # Change directory here if the local dir for data is different
# Dir in Tuan-Anh laptop
dir_data = "D:/IMBRSea/1.Study/_IMBRSea_Thesis/Data_analysis/Thesis_Discard-spatial-analysis/Data"
# obs_final_quota_20062019_TBB_DEF_70-99 with quota and other management variables
obs_final <- readRDS(file=paste0(dir_data,"/","obs_final_quota_20062019_TBB_DEF_70-99.rds"))
obs <- obs_final
########################################
# Data check --------------------------------------------------------------
# Since the rules are not available to all fishing activity
# (certain IcesDivision or period, or in 2019 as rules are available to 2018 only)
# The availability of data should be check (non NA data)
obs_NA <- obs %>% filter(is.na(haul_limit) == T)
obs_NA_non <- obs %>% filter(is.na(haul_limit) == F)
unique(obs_NA_non$NameEnglish)
# data is available for 9 out of 12 taxa of interest
obs_NA_sub <- obs_NA %>% filter(NameEnglish %in% unique(obs_NA_non$NameEnglish)) %>%
select(Year, NameScientific, NameEnglish, HaulTime, IcesDivision, haul_limit)
a1 <- obs_NA_non %>% group_by(NameScientific, NameEnglish) %>% summarize(n_ava = n())
a2 <- obs_NA_sub %>% group_by(NameScientific, NameEnglish) %>% summarize(n_NA = n())
a3 <- obs_NA_sub %>% filter(Year == 2019) %>% group_by(NameScientific, NameEnglish) %>% summarize(n_2019 = n())
obs_check <- left_join(left_join(a1,a2),a3); rm(a1,a2,a3)
# 6/9 taxa has non NA samples more than NA ones.
# The other 3 species are: Common dab, Lemon sole, Raja spp
# Boundary Area of Interest -----------------------------------------------
# Set area of interest (aoi)
world <- ne_countries(scale = "medium", returnclass = "sf")
aoi_lim <- c("Belgium", "Denmark",
"France", "Germany",
"Luxembourg", "Netherlands",
"United Kingdom", "Spain",
"Switzerland", "Austria", "Italy","Ireland")# "Isle of Man"
aoi <- world %>% filter(admin %in% aoi_lim)
ggplot(data = aoi) + geom_sf() + coord_sf(xlim = xlim, ylim = ylim)
st_write(aoi, paste0(dir_data,"/Data_shp","aoi.shp"))
aoi2 <- as(aoi, "Spatial") # Convert aoi from sf to sp to be processed in INLA
plot(aoi2)
# Sampling area
ggplot() +
geom_sf(data = aoi) +
geom_point(data = obs %>%
group_by(HaulLatitude, HaulLongitude) %>%
summarize(n = n()),
mapping = aes(x = HaulLongitude, y = HaulLatitude)) +
coord_sf(xlim = c(-9,10), ylim = c(43.5, 56.5)) +
theme_bw()
# Spatial range of data
range(obs$HaulLatitude) # 44.18333 - 55.98333
range(obs$HaulLongitude) # -8.250000 6.883333
# Define the boundary area around data
# The boundary is selected so that the mesh boundary will cover the
# sea in are of interest only
# if x or y are set larger, other sea such as Mediterranean can be added
# thus making the mesh building complicated
# Extent of area of interest
xym2<- as.matrix(data.frame(x = c(min(obs$HaulLongitude)-0.5,
max(obs$HaulLongitude)+0.5,
max(obs$HaulLongitude)+0.5,
min(obs$HaulLongitude)-0.5),
y = c(min(obs$HaulLatitude)-0.3,
min(obs$HaulLatitude)-0.3,
max(obs$HaulLatitude)+0.3,
max(obs$HaulLatitude)+0.3)))
# Look at the box outside the dots
p <- Polygon(xym2)
ps <- Polygons(list(p),1)
sps <- SpatialPolygons(list(ps))
proj4string(sps) <- proj4string(aoi2) #add projection to sps
plot(sps)
# Since the substrate data does not cover the whole area of interest (sps)
# The spatial polygon for study area must be adjusted
# We first extract shapefile of boundaries (sps) and the land (aoi_rec)
# Then overlay with substrate data in QGIS to mask the boundary polygon
# Save sps to shapefile
#sps1 <- st_as_sf(sps)
#st_write(sps1, paste0(dir_data, "/", "sps1.shp"))
#sps1
# Add new sps created in QGIS
sps <- readOGR(paste0(dir_data, "/Data_shp/", "sps_final.shp"))
plot(sps)
# Crop with the land (aoi)
aoi_rec <- crop(aoi2, sps)
proj4string(sps) <- proj4string(aoi_rec) #Add projection to sps
plot(sps) # Plot the box
plot(aoi_rec,add=T) # Goc_rec is the land
# plot(aoi_rec)
#Select the polygon which contains the data => being the sea
coast <- rgeos::gDifference(sps, aoi_rec) #coast = the sea
plot(coast, col="red") # in red aoi_rec=the land
# coast will be used to identify the mesh in analysis
# save coast
coast_sf <- st_as_sf(coast)
st_write(coast_sf, paste0(dir_data, "/", "coast.shp"))
readOGR(paste0(dir_data, "/", "coast.shp")) #test reopen
########################################
# Environmental variables -------------------------------------------------
# Original files is located in local directory of Tuan-Anh's laptop
# The working version masked from the original files and smaller in size
# will be uploaded to Git_hub
# local directory for original files
dir_data_env <- "D:/IMBRSea/1.Study/_IMBRSea_Thesis/Data_analysis/Thesis_R/Data"
dir_data_env2 <- "C:/Users/User/Desktop/wget" #for monthly CHL and SST
#Get the file names
bathy <- raster(paste0(dir_data_env,"/","bathy_30s.tif"))
slope <- raster(paste0(dir_data_env,"/","biogeo06_30s.tif"))
gravel <- raster(paste0(dir_data_env,"/","GravelPercent.asc"))
mud <- raster(paste0(dir_data_env,"/","MudPercent.asc"))
sand <- raster(paste0(dir_data_env,"/","SandPercent.asc"))
# chl and sst pred are culmulative mean values (2002 - 2019) and will be used for prediction
chl_pred <- raster(paste0(dir_data_env,"/","A20021852019334.L3m_CU_CHL_chlor_a_4km.nc"))
sst_pred <- raster(paste0(dir_data_env,"/","AQUA_MODIS.20020704_20191130.L3m.CU.SST.sst.4km.nc"))
# Unify projection of data (WGS84 EPSG:4326)
proj4string(gravel) <- proj4string(mud) <- proj4string(sand) <- proj4string(bathy)
proj4string(chl_pred) <- proj4string(sst_pred) <- proj4string(bathy)
# Processing --------------------------------------------------------------
# Mask raster data to the area of interest
# First the data should be "croped" to adjust the extent of data
# as well as reduce computational burden
# Then the data will be "masked" to the area of interest
# Since the raster data are different in resolution
# The data should be
# 1. Crop to an extent that is larger than area of interst
# 2. Resample to unify resolution
# 3. Crop and mask again to area of interest
# 1. Crop to an extent that is larger than area of interst (should use lapply)
# extent
ext <- c(xmin = min(obs$HaulLongitude)-1, xmax = max(obs$HaulLongitude)+1,
ymin = min(obs$HaulLatitude)-1, ymax = max(obs$HaulLatitude)+1)
bathy <- crop(bathy, ext)
slope <- crop(slope, ext)
chl_pred <- crop(chl_pred, ext)
sst_pred <- crop(sst_pred, ext)
gravel <- crop(gravel, ext)
mud <- crop(mud, ext)
sand <- crop(sand, ext)
# 2. Resample to unify resolution using nearest neighbor method
# chl, gravel, mud, sand are resampled to the resolution of bathy
# (bathy has the highest resolution among the raster data)
chl_pred <- raster::resample(chl_pred, bathy, method = 'ngb')
sst_pred <- raster::resample(sst_pred, bathy, method = 'ngb')
gravel <- raster::resample(gravel, bathy, method = 'ngb')
mud <- raster::resample(mud, bathy, method = 'ngb')
sand <- raster::resample(sand, bathy, method = 'ngb')
# 3. Crop and mask again to area of interest (sps)
# bathy
bathy <- crop(bathy, sps)
bathy <- mask(bathy, sps)
bathy <- abs(bathy) #make negative (depth) positive
# slope
slope <- crop(slope, sps)
slope <- mask(slope, sps)
# gravel, mud, sand
gravel <- crop(gravel, sps)
gravel <- mask(gravel, sps)
gravel <- gravel/100 #Convert 0-100% to ratio
mud <- crop(mud, sps)
mud <- mask(mud, sps)
mud <- mud/100 #Convert 0-100% to ratio
sand <- crop(sand, sps)
sand <- mask(sand, sps)
sand <- sand/100 #Convert 0-100% to ratio
# chl_pred sst_pred
chl_pred <- crop(chl_pred, sps)
chl_pred <- mask(chl_pred, sps)
sst_pred <- crop(sst_pred, sps)
sst_pred <- mask(sst_pred, sps)
# Save env data -----------------------------------------------------------
dir_data
dir_data_env <- paste0(dir_data,"/Data_env")
raster::writeRaster(bathy, filename = paste0(dir_data_env,"/","bathy_sub.tif"))
raster::writeRaster(slope, filename = paste0(dir_data_env,"/","slope_sub.tif"))
raster::writeRaster(gravel, filename = paste0(dir_data_env,"/","gravel_sub.tif"))
raster::writeRaster(sand, filename = paste0(dir_data_env,"/","sand_sub.tif"))
raster::writeRaster(mud, filename = paste0(dir_data_env,"/","mud_sub.tif"))
raster::writeRaster(chl_pred, filename = paste0(dir_data_env,"/","chl_pred.tif"))
raster::writeRaster(sst_pred, filename = paste0(dir_data_env,"/","sst_pred.tif"))
# Raster visualization
p1 <- rasterVis::levelplot(bathy, margin = F, main = "bathy")
p2 <- rasterVis::levelplot(slope, margin = F, main = "slope")
p3 <- rasterVis::levelplot(chl_pred, margin = F, main = "chl_a")
p7 <- rasterVis::levelplot(sst_pred, margin = F, main = "sst_a")
p4 <- rasterVis::levelplot(gravel, margin = F, main = "gravel")
p5 <- rasterVis::levelplot(mud, margin = F, main = "mud")
p6 <- rasterVis::levelplot(sand, margin = F, main = "sand")
ggpubr::ggarrange(p1, p2, p3, p4, p5, p6, p7,
ncol = 4, nrow = 2)
# Plot Gravel, Mud, Sand
ggpubr::ggarrange(p4, p5, p6,
ncol = 3, nrow = 1)
# Stack environmental predictors
predictors <- stack(bathy, slope, gravel, mud, sand)
# Create the data set with the environmental predictors ---------------------------------
# raster::extract is used to extract values of raster layers at sample points
obs <- as_tibble(cbind(obs, raster::extract(predictors, as.matrix(cbind(obs$HaulLongitude,obs$HaulLatitude)))))
# Rename Environmental predictors
# layer - Bathy
# biogeo06_30s - Slope
# Chlorophyll.Concentration..OCI.Algorithm - Chl_a
names(obs)[names(obs) == "layer"] <- "Bathy"
names(obs)[names(obs) == "biogeo06_30s"] <- "Slope"
names(obs)[names(obs) == "Chlorophyll.Concentration..OCI.Algorithm"] <- "Chl_a"
# Add Substrate factor variables to data
# If percentage of any substrate type is >= 1/3,
# the substrate will be categorized as that type
obs$Substrate <- NA
obs$Substrate <- if_else(obs$GravelPercent >= 1/3, "Gravel",
if_else(obs$SandPercent >= 1/3, "Sand", "Mud")
)
summary(obs$Substrate)
summary(obs) # IF you have NAs => check where they are! # It can be because the data are close to the coast
# It can also be because the extent is not overlapping the observer dataset
# SST and CHL extraction -------------------------------------------------------------
# Monthly SST and monthly and annual CHL are derived from
# MODIS 4-km synthetic products from
# the NASA Ocean Biology Distributed Active Archive Center (OB.DAAD)
# https://oceancolor.gsfc.nasa.gov/
# SST ---------------------------------------------------------------------
library(ncdf4)
##Load netCDF files
#Create a list of file
# Change the directory - paste0("_change_this_part","/") - and name pattern - pattern="*_change_this_part"
(f_sst <- list.files(path = paste0(dir_data_env2,"/"),
pattern="*.L3m.MO.SST.sst.4km.nc",full.names=T))
#Note: Directory separation symbol of R "/" is not the same as of Windows "\
#Explore the netCDF file
nc_open(f_sst[1])
#Indicate variable, read from the netCDF file
var <- "sst" #"chlor_a"
##Convert netCDF files to StackRaster format
sst_stack <- stack(f_sst, varname = var) #Indicate variable as chlor_a
sst_stack
proj4string(sst_stack) <- proj4string(bathy)
#Crop RasterStack by arae of interest
sst_AOI <- crop(sst_stack, ext)
#Extract sst to obs
obs$sst <- NA
for (i in 1:length(f_sst)) {
# progress indicator
print(paste("Processing file",i,"from",length(f_sst),sep=" "))
data <- nc_open(f_sst[i])
# Extract information from netCDF file
# Extract date
dateini<-ncatt_get(data,0,"time_coverage_start")$value
dateend<-ncatt_get(data,0,"time_coverage_end")$value
datemean<-mean(c(as.Date(dateend,"%Y-%m-%dT%H:%M:%OSZ"),as.Date(dateini,"%Y-%m-%dT%H:%M:%OSZ")))
year <- as.integer(format(datemean, "%Y"))
month <- as.integer(format(datemean, "%m"))
obs_sub <- obs %>% filter(Year == year, Month == month)
obs[which(obs$Year == year & obs$Month == month),"sst"] <- raster::extract(sst_AOI[[i]],
as.matrix(cbind(obs_sub$HaulLongitude,obs_sub$HaulLatitude)))
}
summary(obs$sst)
# CHL ---------------------------------------------------------------------
##Load netCDF files
#Create a list of file
# Change the directory - paste0("_change_this_part","/") - and name pattern - pattern="*_change_this_part"
(f_chl <- list.files(path = paste0(dir_data_env2,"/"),
pattern="*.L3m_MO_CHL_chlor_a_4km.nc",full.names=T))
#Note: Directory separation symbol of R "/" is not the same as of Windows "\
#Explore the netCDF file
nc_open(f_chl[1])
#Indicate variable, read from the netCDF file
var <- "chlor_a"
##Convert netCDF files to StackRaster format
chl_stack <- stack(f_chl, varname = var) #Indicate variable as chlor_a
chl_stack
proj4string(chl_stack) <- proj4string(bathy)
#Crop RasterStack by arae of interest
chl_AOI <- crop(chl_stack, ext)
#Extract chl to obs
obs$chl <- NA
for (i in 1:length(f_chl)) {
# progress indicator
print(paste("Processing file",i,"from",length(f_chl),sep=" "))
data <- nc_open(f_chl[i])
# Extract information from netCDF file
# Extract date
dateini<-ncatt_get(data,0,"time_coverage_start")$value
dateend<-ncatt_get(data,0,"time_coverage_end")$value
datemean<-mean(c(as.Date(dateend,"%Y-%m-%dT%H:%M:%OSZ"),as.Date(dateini,"%Y-%m-%dT%H:%M:%OSZ")))
year <- as.integer(format(datemean, "%Y"))
month <- as.integer(format(datemean, "%m"))
obs_sub <- obs %>% filter(Year == year, Month == month)
obs[which(obs$Year == year & obs$Month == month),"chl"] <- raster::extract(chl_AOI[[i]],
as.matrix(cbind(obs_sub$HaulLongitude,obs_sub$HaulLatitude)))
}
summary(obs$chl) # 11280 observations are NA
obs$chl_month <- obs$chl
obs <- obs %>% select(-chl)
# Since a lot of chl are NA, annual chl is proposed to be used
# CHL annual --------------------------------------------------------------
##Load netCDF files
#Create a list of file
# Change the directory - paste0("_change_this_part","/") - and name pattern - pattern="*_change_this_part"
(f_chl <- list.files(path = paste0(dir_data_env2,"/year"),
pattern="*.L3m_YR_CHL_chlor_a_4km.nc",full.names=T))
#Note: Directory separation symbol of R "/" is not the same as of Windows "\
#Explore the netCDF file
nc_open(f_chl[1])
#Indicate variable, read from the netCDF file
var <- "chlor_a"
##Convert netCDF files to StackRaster format
chl_stack <- stack(f_chl, varname = var) #Indicate variable as chlor_a
chl_stack
proj4string(chl_stack) <- proj4string(bathy)
#Crop RasterStack by arae of interest
chl_AOI <- crop(chl_stack, ext)
#Extract chl to obs
obs$chl_year <- NA
for (i in 1:length(f_chl)) {
# progress indicator
print(paste("Processing file",i,"from",length(f_chl),sep=" "))
data <- nc_open(f_chl[i])
# Extract information from netCDF file
# Extract date
dateini<-ncatt_get(data,0,"time_coverage_start")$value
dateend<-ncatt_get(data,0,"time_coverage_end")$value
datemean<-mean(c(as.Date(dateend,"%Y-%m-%dT%H:%M:%OSZ"),as.Date(dateini,"%Y-%m-%dT%H:%M:%OSZ")))
year <- as.integer(format(datemean, "%Y"))
obs_sub <- obs %>% filter(Year == year)
obs[which(obs$Year == year),"chl_year"] <- raster::extract(chl_AOI[[i]],
as.matrix(cbind(obs_sub$HaulLongitude,obs_sub$HaulLatitude)))
}
summary(obs$chl_year)
########################################
# Check NA ---------------------------------------------------------------
# Check
obs_NA <- obs %>% filter(is.na(Bathy) == T | is.na(GravelPercent) == T | is.na(sst) == T | is.na(chl_year) == T) %>%
group_by(HaulLatitude, HaulLongitude) %>% summarize(n = n())
range(obs_NA$HaulLatitude)
range(obs_NA$HaulLongitude)
ggplot() +
geom_sf(data = aoi) +
geom_point(data = obs_NA, aes(x = HaulLongitude, y = HaulLatitude, color = "red")) +
coord_sf(xlim = c(-3,3), ylim = c(44, 53.2))
# Most NA values are sample points on land (might be errors during input of lat/lon)
# 1 point in Biscay bay, should be out of range of substrate data
# Remove NA values in Bathy (Slope), Substrate (GravelPercent, Sand, Mud), sst, chl_year
obs <- obs %>% filter(is.na(Bathy) == F & is.na(GravelPercent) == F &
is.na(sst) == F & is.na(chl_year) == F)
# Calculate Catch, CPUE ---------------------------------------------------
obs <- obs %>% mutate(C = D + L,
CPUE = C/Dur_hour,
logCPUE = log10(CPUE))
summary(obs$CPUE) #no 0 CPUE, do not need to add 1 to log10
# Save data ---------------------------------------------------------------
# Final obs data
str(obs) #50,594 observations
# dir_data="F:/backup_to_H/R_gitlab/spatial_discards_TuanAnh/followup_jochen" # Jochen
saveRDS(obs,file=paste0(dir_data,"/","obs_final_LA_full_pred_20062019_TBB_DEF_70-99.rds")) #rds
# Adjustment of landing limitation in the final data ----------------------
# This adjustment should be conducted in the 'Thesis_data processing_landing limitation' script
# 1. No haul_limit - quota_uti = 0
# 2. For close_fishing, if haul_limit > -100kg then 0, otherwise 1
# Ray has -5000 kg haul_limit, different from the others, but reasonable to keep at 1 quota_uti.
str(obs)
# 1. Transform Quota_uti from NA to 0.
# Assumption: no landing limitation at these sample points, thus the quota utilization is 0
obs[which(is.na(obs$Quota_uti) == T),"Quota_uti"] <- 0
# 2. For close_fishing, if haul_limit > -100kg then Quota_uti = 0, otherwise 1
# The original data has Quota_uti = 0 when haul_limit = 0, otherwise 1
obs[which(obs$limit_type == "closing_fishing" & obs$haul_limit > - 100), "Quota_uti"] <- 0
View(
obs %>% filter(limit_type == "closing_fishing") #Double check
)
# Save new file for analysis
saveRDS(obs,file=paste0(dir_data,"/","obs_final_LA2_full_pred_20062019_TBB_DEF_70-99.rds"))
|
6ee4d355247f900fbbc83ff7df98eae3c80758d5 | abc470631514c9261498e960137b112259dff4ba | /inst/examples1/server.R | 2a97310e2f4b33ea99aac4dd9c894f8fa1c39d38 | [] | no_license | jonkatz2/enquery | 8c29bfb7910150aa1946a5cd1eb2cb31c40274bf | e7bcae098053aeae40555a67512ef52b12f06120 | refs/heads/master | 2021-07-08T15:52:12.219085 | 2020-07-12T13:04:58 | 2020-07-12T13:04:58 | 148,210,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 113 | r | server.R | library(shiny)
library(enquery)
options(shiny.sanitize.errors = FALSE)
function(input, output, session) {
}
|
6f189be5e7f3c15c2b84cc7f765aa92b9f421cec | 169094a8afb4b431d445101260a0c4e988d42842 | /prep_whi/FIS/fis_prep_whi.R | 87b25820e3fd62b3ee65b8a2cb4e0d86a1b5bd61 | [] | no_license | OHI-Science/whi | 60c73ab4f9eeea27e3d539a4d5863c01f08de2ce | 7658065914e1d77d0192cf6ee8b074ae849cc84e | refs/heads/master | 2018-07-05T19:40:50.153919 | 2018-05-31T22:03:39 | 2018-05-31T22:03:39 | 108,908,592 | 0 | 1 | null | 2018-04-23T19:10:07 | 2017-10-30T21:06:58 | R | UTF-8 | R | false | false | 2,413 | r | fis_prep_whi.R | #FIS prep
###ISSUE###We are missing catch data that is not associated with islands - Must have along given us data from catch blocks around island (including cross seamount)
##ISSUE resolved with updated catch data FEb 2018#
## setup: libraries, file paths ----
library(tidyverse) # install.packages('tidyverse')
dir_layers <- file.path('~/Documents/github/WHI/region2017/layers')
#naming convention of the data file: it is "goalcode_layername_assessmentYEAR.csv".
data_file <- file.path(dir_layers, 'fis_sus_updated_whi2018.csv') #old file fis_sus_scores_updated.csv fis_sus_updated_mhi2017.csv
d <- readr::read_csv('fis_sus_updated_whi2018.csv')
#catch
dir_prep <- file.path('~/Documents/github/WHI/prep/FIS')
data_file <- file.path(dir_prep, 'fis_catch_whi_2018.csv')
c <- readr::read_csv('fis_catch_whi_2018.csv')
#c<-subset(c, mus!="PMUS")
str(c)
#combine pelagic catch summarized by island into one catch estimate for all of Hawaii - pelagics and bottom fish only
library(plyr)#install.packages('plyr')
d_reef<-subset(c, mus=="CHCRT"|mus=="PHCRT")#subset data for reef fish
#d_reef<-ddply(d_reef, .(species,year,rgn_id), summarise, total=sum(catch), licenses=sum(licenses))#summarize the reef fish landings by rgn also
d_deep7<-subset(c, mus=="BMUS")
#d_deep7<-ddply(d_deep7, .(year, species), summarise, total=sum(catch))
d_pel<-subset(c, mus=="PMUS")
d_cp<-subset(c, fishery=="coastal_pelagic")
## save this local data layer in "layers" folder with the same naming convention as above format
dir_layers <- file.path('~/documents/github/whi/region2017/layers')
readr::write_csv(d_pel, file.path(dir_layers, "fis_pelagic_catch_whi2018.csv"))
readr::write_csv(d_cp, file.path(dir_layers, "fis_cp_catch_whi2018.csv"))
readr::write_csv(d_reef, file.path(dir_layers, "fis_reef_catch_whi2018.csv"))
readr::write_csv(d_deep7, file.path(dir_layers, "fis_bottom_catch_whi2018.csv"))
#readr::write_csv(d_pel_islands, file.path(dir_layers, "fis_pel_propcatch_mhi2017.csv"))
## You will see a notification in the "Git" window in RStudio once the layer is saved successfully.
#dont forgt to register the layer in layers.csv
# #####all catch data in one file
# data_file <- file.path(dir_layers, 'fis_catch_mhi2017.csv')
# d <- readr::read_csv(data_file)
#
# str(d)
# d<-ddply(d, .(species, year, rgn_id), numcolwise(sum) )
# readr::write_csv(d, file.path(dir_layers, "fis_catch_island_mhi2017.csv"))
#
|
c1280a2bf1aff297814b3822e758137c90f29aa2 | b858cccaca3a09b1ac9f17416f2d2b96a4b01b53 | /stockchart/ui.R | 864e0258ddf8f9346e9fc18fa6112cb25502b916 | [] | no_license | pauljabernathy/proj1 | a962726c794553a2f96a0996015d80ba3c9b4f1d | 5ae1a3210d8cbb8b2ed1bcfe891c01e4c47c3b03 | refs/heads/master | 2016-09-03T06:56:17.714670 | 2014-06-20T16:05:27 | 2014-06-20T16:05:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 735 | r | ui.R | library(shiny)
source('server.R')
shinyUI(fluidPage(
titlePanel("Stock Chart Generator"),
sidebarLayout(
sidebarPanel(numericInput("initial",label = h4("Initial Investment"), value=16.66),
actionButton("oneChartButton","get one chart"),
numericInput("numRuns", label=h4("number of simulations"), value=1000),
actionButton("doSimButton","Do Simulation")
),
mainPanel(
plotOutput("stockchart"),
verbatimTextOutput("amount"),
p("monte carloe results"),
verbatimTextOutput("sim")
# verbatimTextOutput(renderPrint({
# input$goButton
# isolate(
# doSimulation()[1]
# )
# }))
)
)
)) |
76880ec9529bcc3d3d8d7933264819998084b451 | d11e6ae866c3518c8056358b2257e1366c14f686 | /Rolling window - SVR.R | 7d07f772315b9d21a9a9bf0727fd62e5f4ad1091 | [] | no_license | lafet1/bachelor_thesis | 48678f26d435e077eb4ebdd2372b26bc4fd63ab6 | 5200963593febd4163fcf520768e5506602a529c | refs/heads/master | 2021-09-04T17:21:43.670176 | 2018-01-20T09:13:55 | 2018-01-20T09:13:55 | 116,477,492 | 0 | 1 | null | null | null | null | ISO-8859-13 | R | false | false | 5,330 | r | Rolling window - SVR.R |
# rm(list=ls())
library(RCurl)
library(dplyr)
library(e1071) # SVR via svm()
library(kernlab) # SVR via ksvm() - caret does not support svm() with RBF kerneł
library(forecast) # ARIMA via auto.arima()
library(tseries)
library(PSF) # PSF via psf()
library(mlr) # train() for optimalization
library(emoa) # required by mlr package
library(FSelector)
library(parallelMap)
set.seed(100)
# list with measures
a <- c()
b <- c()
c <- c()
#svrPerf <- list(a, b, c)
#svrPred <- list()
#svrFeat <- list()
#svrParams <- list()
# for parameter tuning
ps <- makeParamSet(
makeNumericParam("C", lower = -12, upper = 12, trafo = function(x) 2^x),
makeNumericParam("sigma", lower = -12, upper = 12, trafo = function(x) 2^x),
makeDiscreteParam("kernel", values = c("rbfdot")),
makeNumericParam("epsilon", lower = -5, upper = 0, trafo = function(x) 10^x)
)
ctrl <- makeTuneMultiCritControlRandom(maxit = 100L)
rdesc <- makeResampleDesc("FixedCV", initial.window = 0.5) # FixedCV is for time-series CV
# for feature selection
rdesc1 <- makeResampleDesc("CV", iters = 50)
psFeatSel <- makeParamSet(makeDiscreteParam("fw.abs", values = seq(2, 6, 1)))
lrnFilter <- makeFilterWrapper(learner = "regr.ksvm", fw.method = "information.gain")
ctrl1 <- makeTuneMultiCritControlRandom(maxit = 50L)
# variable names
varNames <- c(paste("lag", 24:1, sep = ""), "response")
# starting parallelization
parallelStartSocket(2)
for(i in 1:100){ # loop for getting the features
# moving training data set
svmTrainResp <- ts2[(25 + (i-1)*168):(528 + (i-1)*168)] # response in training
svmTrainPred <- data.frame()
for (j in 0:503){
auxTrainPred <- c(ts2[(1 + (i-1)*168 + j):(24 + (i-1)*168 + j)]) # predictors in training
svmTrainPred <- rbind(svmTrainPred, auxTrainPred) # binding all predictors
}
svmTrain <- cbind(svmTrainPred, svmTrainResp) # finished data set
colnames(svmTrain) <- varNames
# moving testing data set
svmTestResp <- ts2[(529 + (i-1)*168):(696 + (i-1)*168)] # response in testing
svmTestPred <- data.frame()
for (j in 0:167){
auxTestPred <- c(ts2[(505 + (i-1)*168 + j):(528 + (i-1)*168 + j)]) # predictors
svmTestPred <- rbind(svmTestPred, auxTestPred) # binding all predictors
}
svmTest <- cbind(svmTestPred, svmTestResp) # finished data set
colnames(svmTest) <- varNames
# first the feature selection needs to be carried out -- selecting features
regrTr <- makeRegrTask(data = svmTrain, target = "response")
filter1a <- tuneParamsMultiCrit(learner = lrnFilter, task = regrTr, resampling = rdesc1, par.set = psFeatSel,
measures = list(mae, rmse), control = ctrl1, show.info = FALSE)
# new learner based on the found features
lrnFilter1 <- makeFilterWrapper(learner = "regr.ksvm", fw.method = "information.gain", fw.abs = filter1a$x[[1]]$fw.abs)
modelFilter <- train(lrnFilter1, regrTr)
svrFeat[[i]] <- c(getFilteredFeatures(modelFilter), "response")
print(i)
}
# stopping parallelization
parallelStop()
# starting parallelization
parallelStartSocket(2)
for(i in 1:100){ # loop for training the actual model
# moving training data set
svmTrainResp <- ts2[(25 + (i-1)*168):(528 + (i-1)*168)] # response in training
svmTrainPred <- data.frame()
for (j in 0:503){
auxTrainPred <- c(ts2[(1 + (i-1)*168 + j):(24 + (i-1)*168 + j)]) # predictors in training
svmTrainPred <- rbind(svmTrainPred, auxTrainPred) # binding all predictors
}
svmTrain <- cbind(svmTrainPred, svmTrainResp) # finished data set
colnames(svmTrain) <- varNames
# moving testing data set
svmTestResp <- ts2[(529 + (i-1)*168):(696 + (i-1)*168)] # response in testing
svmTestPred <- data.frame()
for (j in 0:167){
auxTestPred <- c(ts2[(505 + (i-1)*168 + j):(528 + (i-1)*168 + j)]) # predictors
svmTestPred <- rbind(svmTestPred, auxTestPred) # binding all predictors
}
svmTest <- cbind(svmTestPred, svmTestResp) # finished data set
colnames(svmTest) <- varNames
# parameter optimization
regrTr1 <- makeRegrTask(data = svmTrain[svrFeat[[i]]], target = "response")
svmTune <- tuneParamsMultiCrit("regr.ksvm", task = regrTr1, resampling = rdesc, par.set = ps,
measures = list(mae, rmse), control = ctrl, show.info = FALSE)
# optimized model learning
optParams <- vector()
for(k in 1:length(svmTune$x)){ # get the best set in case of multiple optima
optParams[k] <- svmTune$x[[k]]$C # high C tends to screw up predictions a bit
}
svrParams[[i]] <- svmTune$x[[which.min(optParams)]]
lrnFilter2 <- setHyperPars(makeLearner("regr.ksvm"), par.vals = svmTune$x[[which.min(optParams)]])
modelFinal <- train(lrnFilter2, task = regrTr1)
# predictions
regrTe1 <- makeRegrTask(data = svmTest[svrFeat[[i]]], target = "response")
predSVM <- predict(modelFinal, task = regrTe1)
# measures
svrPred[[i]] <- predSVM
svrPerf[[1]][i] <- sum(abs((predSVM$data$truth - predSVM$data$response)))/ length(predSVM$data$truth)
svrPerf[[2]][i] <- sum(abs((predSVM$data$truth - predSVM$data$response)
/ predSVM$data$truth)) / length(predSVM$data$truth)
svrPerf[[3]][i] <- sqrt(sum((predSVM$data$truth - predSVM$data$response)^2) / length(predSVM$data$truth))
print(i)
}
# stopping parallelization
parallelStop()
|
6073c08449eb924a9b03fafb4bd5fbc98ae4171e | fd8f64801574f35593eca598668db2a4a81ce899 | /PRMSdata/shiny_demo_MT2_vers2.R | 28cd42b6f4ce9c4fc46fcbe6f2540e32d6022913 | [] | no_license | abock80/SB_Mapping | 66a4b6267f729adfeb22f805800f518f8eb18c8f | 3e10ab25aa1658e3c95e4d6aa4fd2a71f4e63bba | refs/heads/master | 2020-05-25T15:45:45.755369 | 2016-09-02T22:34:03 | 2016-09-02T22:34:03 | 59,135,510 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,351 | r | shiny_demo_MT2_vers2.R | library(shiny)
library(leaflet)
library(RColorBrewer)
# one script - ui.r
# one script - server.r
y2030=c('ECHAM5','GENMON','Mean',NA)
y2055=c('ECHAM5','GENMON','GFDL','Mean')
y2080=c('ECHAM5','GENMON','Mean',NA)
dd2<-data.frame(y2030,y2055,y2080)
ui <- bootstrapPage(
tags$head(tags$style(
HTML('
#select {background-color: rgba(0,0,255,0.2);;}
#GCMnames {background-color: rgba(255,255,255,1);}')
)),
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(top=10,right=10,
selectInput("select", label = h3("Year"),
choices = list("2030" = 1, "2055" = 2,
"2080" = 3), selected = 1),
actionButton("do", "Select Year"),
radioButtons("GCMnames", label="GCM Names", choices="",selected=""),
selectInput("colors", "Color Scheme",
rownames(subset(brewer.pal.info, category %in% c("seq", "div")))),
checkboxInput("legend", "Show legend", TRUE))
#verbatimTextOutput('summary')
)
server <- function(input, output, session) {
output$summary <- renderPrint({
print(input$selection)
print(input$target)
print(values[[input$selection]])
print(values[[input$selection]][input$target + 1])
})
gcmNames <- eventReactive(input$do,{
unlist(as.character(levels(dd2[,as.numeric(input$select)])))
})
observe({
z<-gcmNames()
updateRadioButtons(session, "GCMnames", choices = c(z), inline=TRUE,selected="")
})
output$text1 <- renderText({
input$GCMnames
})
# This reactive expression represents the palette function,
# which changes as the user makes selections in UI.
colorpal <- reactive({
colorNumeric(input$colors, dep2030$MEAN_2030)
})
output$map <- renderLeaflet({
# Use leaflet() here, and only include aspects of the map that
# won't need to change dynamically (at least, not unless the
# entire map is being torn down and recreated).
leaflet(finalSegs) %>% addTiles()%>%
fitBounds(~min(Longs), ~min(Lats), ~max(Longs), ~max(Lats))
})
observe({
pal <- colorpal()
popup <- paste0("<strong>Name: </strong>",
finalSegs@data$seg_id)
leafletProxy("map",data=finalSegs) %>%
clearShapes() %>%
#addPolylines(color="red",weight=3,popup=~popup)
addPolylines(color=~pal(dep2030$MEAN_2030),weight=3,popup=~popup)
})
# Use a separate observer to recreate the legend as needed.
observe({
pal <- colorpal()
popup <- paste0("<strong>Name: </strong>",
finalSegs@data$seg_id)
proxy <- leafletProxy("map",data=finalSegs) %>%
clearShapes() %>%
#addPolylines(color="red",weight=3,popup=~popup)
addPolylines(color=~pal(dep2030$MEAN_2030),weight=3,popup=~popup)
# Remove any existing legend, and only if the legend is
# enabled, create a new one.
proxy %>% clearControls()
if (input$legend) {
pal <- colorpal()
proxy %>% addLegend(position = "bottomright",
pal = pal, values = ~dep2030$MEAN_2030
)
}
})
}
shinyApp(ui, server) |
aa465cdcd30e0912d762ce44b172ca83a2ff35bf | 1cf81a76e49517222cd309668866320ec8d6ae7d | /server.R | a7e24b63a34623cdcb4bb889e7fe2fcfd555d823 | [] | no_license | neilkutty/DC_Crime_Data | 7c94c824b72714c2ee4b87a1f903a7aa2f72bb0a | f841c20683059991a74f4a6c7e5ddbac7afd1859 | refs/heads/master | 2018-11-06T21:40:20.654794 | 2018-08-27T20:41:46 | 2018-08-27T20:41:46 | 54,295,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,451 | r | server.R | library(jsonlite)
library(ggplot2)
library(leaflet)
library(dplyr)
library(tidyr)
library(curl)
library(lubridate)
#library(rgdal)
library(caret)
########---------------------------------------------------------------------#>>>
## Retrieve the data in JSON format from opendata.dc.gov using fromJson()
dccrimejsonlite <- fromJSON('http://opendata.dc.gov/datasets/dc3289eab3d2400ea49c154863312434_8.geojson')
## use cbind() combine the list elements and create a dataframe
dc_crime_json <- cbind(dccrimejsonlite$features$properties,dccrimejsonlite$features$geometry)
## Seperate and clean lat/long columns but keep original datetime column
## --also separate REPORT_DAT column
dc_crime_lite <- dc_crime_json %>%
select(OFFENSE,SHIFT,REPORT_DAT,BLOCK,METHOD,coordinates) %>%
separate(coordinates, into = c("X", "Y"), sep = ",")%>%
separate(REPORT_DAT, into = c("Date","Time"), sep="T", remove = FALSE)%>%
mutate(Weekday = weekdays(as.Date(REPORT_DAT)),
Date = as.Date(Date),
X = as.numeric(gsub("c\\(","",X)),
Y = as.numeric(gsub("\\)","",Y)))
dc_crime_lite$DATETIME = as.POSIXct(strptime(dc_crime_lite$REPORT_DAT, tz = "UTC", "%Y-%m-%dT%H:%M:%OSZ"))
#Shiny server
function(input, output, session) {
filterData <- reactive({
if (is.null(input$mymap_bounds))
return(dc_crime_lite)
bounds <- input$mymap_bounds
latRng <- range(bounds$north, bounds$south)
lngRng <- range(bounds$east, bounds$west)
filter(dc_crime_lite, Y >= latRng[1] & Y <= latRng[2] & X >= lngRng[1] & X <= lngRng[2])
})
output$plotOffense <-
if(is.null(filterData)){
return()
}else{
renderPlot({
off <- as.data.frame(table(filterData()$OFFENSE))
off$Freq <- as.numeric(off$Freq)
off$Var1 <- factor(off$Var1)
colnames(off) <- c("OFFENSE","COUNT")
ggplot(off, aes(x=OFFENSE,y=COUNT)) +
geom_bar(stat="identity",alpha = 0.45, fill='red') +
ggtitle("Number of Crimes by Offense") +
geom_text(aes(label = off$COUNT), size = 3.5, hjust = .58, color = "black")+
coord_flip()+
scale_x_discrete(label = function(x) lapply(strwrap(x, width = 10, simplify = FALSE), paste, collapse="\n"))+
theme(axis.title=element_text(size=10),
axis.text.y = element_text(size=10, hjust = 1),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()
)
})}
output$plotSeries <-
if(is.null(filterData)){
return()
}else{
renderPlot({
ts <- filterData() %>%
select(Date) %>%
group_by(Date) %>%
summarize(count = n())
ggplot(ts[-c(nrow(ts),1),], aes(x=Date, y=count, alpha = 0.8))+
geom_line()+
geom_text(aes(label = ts[-c(nrow(ts),1),]$count), size = 3.5, hjust = .58, color = "black")+
ggtitle("Number of Crimes by Day")+
guides(alpha=FALSE)+
theme(axis.title=element_text(size=10),
axis.text.x = element_text(size = 10, angle = 45, hjust = 1))+
theme_bw()
})
}
output$plotTimeline <-
if(is.null(filterData)){
return()
}else{
renderPlot({
scat <- filterData() %>%
select(OFFENSE, Date) %>%
group_by(OFFENSE, Date) %>%
summarize(count = n())
ggplot(scat, aes(x=Date, y=count, color=OFFENSE))+
geom_point()+
ggtitle("Number of Crimes by Day by Offense")+
scale_fill_brewer("Set2")+
theme(axis.title=element_text(size=10),
axis.text.x = element_text(size = 10, angle = 45, hjust = 1),
panel.background = element_rect(fill = "white"),
strip.background = element_rect(fill = "white"),
legend.position = c(.35,.75),
legend.background = element_rect(fill=alpha('white', 0.2)))
})
}
output$plotDayTime <-
if(is.null(filterData)){
return()
}else{
renderPlot({
dt <- filterData() %>%
select(Weekday, SHIFT) %>%
group_by(Weekday, SHIFT) %>%
summarize(count = n())
dt$Weekday <- factor(dt$Weekday, levels= c("Sunday", "Monday","Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"))
dt$SHIFT <- factor(dt$SHIFT, levels= c("DAY","EVENING","MIDNIGHT"))
dt[order(dt$Weekday,dt$SHIFT),]
ggplot(dt,aes(x=SHIFT,y=count,fill=SHIFT)) +
geom_bar(stat="identity", alpha = 0.75) +
scale_fill_brewer(palette = 'Set2')+
scale_y_continuous()+
ggtitle("Number of Crimes by Day of Week and Time of Day (SHIFT)")+
facet_grid(.~Weekday)+
theme(axis.title=element_text(size=10),
axis.text.x = element_text(size = 10, angle = 45, hjust = 1),
panel.background = element_rect(fill = "white"),
strip.background = element_rect(fill = "white"),
legend.position = c(.085,.9),
legend.background = element_rect(fill=alpha('white', 0.2)))
})}
output$table1 <-
renderDataTable(options=list(pageLength=25),{
filterData()%>%
select(Weekday, SHIFT, DATETIME, BLOCK, OFFENSE, METHOD)
})
points <- eventReactive(input$resetMap,{
cbind(dc_crime_lite$X,dc_crime_lite$Y)
}, ignoreNULL = FALSE)
output$mymap <- renderLeaflet({
leaflet() %>%
addProviderTiles("OpenStreetMap.Mapnik",
options = providerTileOptions(noWrap = TRUE)
) %>%
addMarkers(data = points(),
popup = paste0("<strong>Report Date: </strong>",
dc_crime_lite$DATETIME,
"<br><strong>Offense: </strong>",
dc_crime_lite$OFFENSE,
"<br><strong>method: </strong>",
dc_crime_lite$METHOD,
"<br><strong>shift: </strong>",
dc_crime_lite$SHIFT,
"<br><strong>blocksite address: </strong><br>",
dc_crime_lite$BLOCK
),
clusterOptions = markerClusterOptions()
)
})
} |
e573e5a4d8d40e10b1fe405760d3d2d0a4273560 | 7eb711dcaf5eb7f78a21672311aab5b6877ed9f5 | /R/DE_edgeR.R | eddec501551dde6d5a68086d2995bdd941a0e3d6 | [] | no_license | xizhihui/codes | ef8d2135648e065768d7d11cc7f70d8e55da4182 | bd1beb82ff177f0b75fa320a2e637af5d1da64de | refs/heads/master | 2021-06-18T04:31:54.831817 | 2021-06-15T05:03:22 | 2021-06-15T05:03:22 | 160,029,844 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,048 | r | DE_edgeR.R | ########################################################
# 使用edgeR进行差异分析
# 使用方式:
# source('DE_edgeR.R')
# edgeR_DGE(exprSet, group, type)
# exprSet: 表达矩阵
# group: 样品分组
# type: 差异分析方法, classical, lrt, qlt
# + classcial
# + glm: likelihood ratio test/ quasi-likelihood F-test
# + + quasi-likelihood(qlf): 推荐用于差异分析,因为他对错误率限制较好。
# + + likelihood(lrt):对与单细胞RNA-测序和没有重复的数据较好
##################### example ###########################
# suppressPackageStartupMessages(library(edgeR))
# setwd('~/practice/180716_edgeR/')
# rawdata = read.table('rawdata.txt')
# head(rawdata)
# rawdata = rawdata[-(1:5),]
# groups = grepl('01A', colnames(rawdata))
# groups = ifelse(groups, 'tumor', 'normal')
# table(groups)
# edgeR_DGE(exprSet = rawdata, group=groups, type='classical')
########################################################
edgeR_DGE <- function(exprSet, group, type, cpm=c(100,4)) {
require(edgeR)
# 生成DEGList
DGE = DGEList(counts=exprSet, group=group)
DGE.old = DGE
# 生成design
design = model.matrix(~group)
cat('design生成完成\n')
# cpm过滤
keep = rowSums(edgeR::cpm(DGE) > cpm[1]) >= cpm[2]
DGE = DGE[keep,]
cat('过滤完成\n')
# 进行校正
DGE = calcNormFactors(DGE)
cat('计算校正因子\n')
# 检测离群值和关系
png('plotMDS.png')
plotMDS(DGE, method='bcv', col=as.numeric(DGE$samples$group))
legendCol = unique(as.numeric(DGE$samples$group))
legendGroup = unique(as.character(DGE$samples$group))
legend("bottomright", legendGroup, col=legendCol, pch=20)
dev.off()
cat('MDS 出图完毕\n')
if (type == 'classical') {
# 计算离散度dispersion
d = estimateCommonDisp(DGE)
d = estimateTagwiseDisp(d)
test = exactTest(d)
cat('使用classical进行差异分析完毕\n')
} else {
# 计算离散度dispersion
d = estimateGLMCommonDisp(DGE)
d = estimateGLMTrendedDisp(d)
d = estimateGLMTagwiseDisp(d)
if (type == 'qlf') {
fit = glmQLFit(d, design)
test = glmQLFTest(fit, coef=2)
cat('使用qlf差异分析完毕')
} else if (type == 'lrt') {
fit = glmFit(d, design)
test = glmLRT(fit, coef=2)
cat('使用lrt差异分析完毕')
}
}
png('plotBCV.png')
plotBCV(d)
dev.off()
png('plotSmear.png')
de = decideTestsDGE(test, adjust.method="BH", p.value = 0.05)
tags = rownames(d)[as.logical(de)]
plotSmear(test, de.tags=tags)
abline(h=c(-4,4), col='blue')
dev.off()
cat('BCV,Smear 出图完毕\n正则保存数据.')
finalDGE = topTags(test, n=nrow(exprSet))
finalDGE = as.data.frame(finalDGE)
write.table(file='DGE_edgeR.txt', finalDGE)
save(test, fit, file='DE_edgeR.RData')
cat('数据保存完毕,运行完成')
} |
6197e13a9eb5231f676caff48bf0ffd067a78080 | e203f637b3387ff74be1a950f043b99d58787852 | /4.2.R | d827eb08968e72ec935c1c6f31fa38e8998ec45b | [
"MIT"
] | permissive | tondi/stat | e274711de6a9c01ce9599e46e370a4cb5172a9b8 | 0351cb0708fd20339ff6cc7f6b21fe214b7d3e4a | refs/heads/master | 2022-12-03T10:54:20.170710 | 2020-09-01T08:26:56 | 2020-09-01T08:26:56 | 291,940,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 416 | r | 4.2.R | install.packages("psych")
results <- matrix(c(315, 101, 108, 32), nrow=2, ncol=2, byrow = TRUE)
expected <- matrix(
c(9, 3, 3, 1),
nrow=2,
ncol=2,
byrow = TRUE
)
# lm(results[2][2] ~ ., data = results)
podzielone <- results / expected
chisq.test(podzielone, expected)
# X-squared = 0.0035162, df = 1, p-value = 0.9527
# eksperyment nie potwierdzil rozwazan teorytycznych na poziomie istotnosci = 0.05
|
b63ede35a130622a6c3fa149b1492babff8b8264 | 287fe76a4b1fa3f147f6ad8c9ccbc70916823b39 | /Plot2.R | 12f8326dc72392bc488c720541f00449d92f7922 | [] | no_license | wheatonr/bendy-chicken | 9394b848c8de4e9e7779b2de60d8b9d401fedfd1 | 443bc85e7e873234692dd22edba92fb60c962c18 | refs/heads/master | 2020-04-12T15:23:04.400033 | 2014-09-08T06:08:13 | 2014-09-08T06:08:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 649 | r | Plot2.R | ## Exploratory Data Analysis project 2
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## compute sum by year for Baltimore only
baltimore<-aggregate(Emissions ~ year, sum,data=NEI[NEI$fips=="24510",])
##setup for png output
png(filename="plot2.png",width=480,height=480,units="px")
## plot Emissions vs year
plot(baltimore,type="p",main="Baltimore City PM25 Emissions",
ylab="PM25 (tons)",xlab="Year")
## add trendline
abline(lm(Emissions ~ year,baltimore),col="red")
## add ledgend
legend(x="bottomleft",legend=c("observed","trend"),col=c("black","red"),
lwd=1,lty=c(NA,1),pch=c(1,NA))
dev.off() |
7f27b7719422c4ce6384d0d64c3e14d6de4bdc73 | 80c022bc6f023dae8549f35a6759e928f99c521d | /R/stats.R | bb446cc38175f92bc90f533cc6a67881072cca1d | [] | no_license | SonmezOzan/mmfit | 37d5bf3735c431718a702f2943466df73574d9d9 | ba28ef0321c990daa39484aeafd3d8e54dbb462d | refs/heads/master | 2021-01-20T05:34:18.521092 | 2017-08-26T00:21:22 | 2017-08-26T00:21:22 | 101,451,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,024 | r | stats.R | #' summary.mmfit function and plot.mmfit function
#'
#' @param x a set of sample data
#' @param g moment function
#' @param start initial values to start with
#' @param type distribution type. It take a value from "Poisson", "power law", "Gamma", "Beta", mixture of "2Poissons", and "2Exponential".
#'
#' @description We can use summary.mmfit(x, g, start, type) to obtain estimated distribution parameters.
#' We use plot.mmfit(x, g, start, type) to plot fitting curve and empirical CDF with K-S confidence band.
summary.mmfit = function(x, g, start, type){
result = mmfit(x, g, start, type)
if(type == "Beta"){
cat("alpha:", result$thetahat[1], "\t", "std. error:", result$thetahatses[1], "\n")
cat("beta:", result$thetahat[2], "\t", "std. error:", result$thetahatses[2], "\n")
}
else if(type == "Poisson"){
cat("lambda:", result$thetahat[1], "\t", "std. error:", result$thetahatses[1], "\n")
}
else if(type == "Gamma"){
cat("alpha:", result$thetahat[1], "\t", "std. error:", result$thetahatses[1], "\n")
cat("beta:", result$thetahat[2], "\t", "std. error:", result$thetahatses[2], "\n")
}
else if(type == "2Poisson"){
cat("lambda1:", result$thetahat[1], "\t", "std. error:", result$thetahatses[1], "\n")
cat("lambda2:", result$thetahat[2], "\t", "std. error:", result$thetahatses[2], "\n")
cat("p:", result$thetahat[3], "\t", "std. error:", result$thetahatses[3], "\n")
}
else if(type == "2Exponential"){
cat("lambda1:", result$thetahat[1], "\t", "std. error:", result$thetahatses[1], "\n")
cat("lambda2:", result$thetahat[2], "\t", "std. error:", result$thetahatses[2], "\n")
cat("p:", result$thetahat[3], "\t", "std. error:", result$thetahatses[3], "\n")
}
else{
cat("alpha:", result$thetahat[1], "\t", "std. error:", result$thetahatses[1], "\n")
}
}
plot.mmfit = function(x, g, start, type){
result = mmfit(x, g, start, type)
gridExtra::grid.arrange(result$denscomp, result$cdfband, ncol=2)
}
|
ef36f56e439696f0770dac34fc4e62923683c52c | 449482d6e7183a795818a689da2132a2b7ce93ed | /tests/fixtures/r-gsl/main.R | b478e733d585ca5391e983867a0957a374386ce3 | [
"Apache-2.0"
] | permissive | stencila/dockta | 8ecb0aabc63f7451b5f7fc521d7d354d70afc89d | 8fb329fb026924a00527643a6ac639651ac20329 | refs/heads/master | 2023-08-09T14:11:02.250754 | 2023-07-26T03:59:47 | 2023-07-26T03:59:47 | 151,520,823 | 57 | 11 | Apache-2.0 | 2023-08-27T10:12:16 | 2018-10-04T05:00:17 | TypeScript | UTF-8 | R | false | false | 115 | r | main.R | # A test fixture with an R package with a system requirement
# that has an array for deb requirements
library(gsl)
|
17edc18523f90a86d769816300babc93d60d1b44 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Matrices_And_Linear_Transformations_by_Charles_G._Cullen/CH1/EX1.11/Ex1_11.R | 42f15db6e278e24283152c95978b62db55471c2e | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 884 | r | Ex1_11.R | #page - 41
#section - 1.7 SPECIAL KINDS OF MATRICES
#example 11
#matrix A
A <- matrix(c(1,2,3,4,2,5,-6,7,3,-6,8,-9,4,7,-9,0), 4, 4, byrow=TRUE)
A
#matrix B
B <- matrix(c(0,1,2,3,-1,0,-4,5,-2,4,0,6,-3,-5,-6,-0), 4, 4, byrow=TRUE)
A
AT = t(A)
AT
BT = t(B)
BT
#function to compare two matrices
#symmetric check function
matsym <- function(x, y)
is.matrix(x) && is.matrix(y) && dim(x) == dim(y) && all(x == y)
#skew symmetric check function
matskew <- function(x, y)
is.matrix(x) && is.matrix(y) && dim(x) == dim(y) && all(x == -y)
#condition check
if(matsym(AT, A)){
print("A is a symmetric matrix")
}else if(matskew(AT, A)){
print("A is a skew symmetric matrix")
}else{
print("none")
}
if(matsym(BT, B)){
print("B is a symmetric matrix")
}else if(matskew(BT, B)){
print("B is a skew symmetric matrix")
}else{
print("none")
} |
659176b81751b2f1bdffd7bdc1a42497fd10ec23 | 70c0910f9cdcfe59e2dc93b20b813df0c740845f | /man/print.AR.Rd | 34be389633b8281d7a0f1620b0ec07181c286bae | [] | no_license | michaldanaj/MDBinom | a21ca1e7d486168922d32eac641fb1df1d518875 | 082a583fc9905ea13acfda382a8a9c31ddf3573c | refs/heads/master | 2021-07-25T01:09:00.904252 | 2020-06-11T21:17:28 | 2020-06-11T21:17:28 | 52,234,495 | 0 | 0 | null | 2019-04-24T19:04:29 | 2016-02-21T23:49:48 | R | UTF-8 | R | false | true | 353 | rd | print.AR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MDBinom.r
\name{print.AR}
\alias{print.AR}
\title{Wyświetla statystyki przechowywane w obiekcie \code{\link{AR}}}
\usage{
\method{print}{AR}(x)
}
\arguments{
\item{x}{obiekt klasy \code{AR}.}
}
\description{
Wyświetla statystyki przechowywane w obiekcie \code{\link{AR}}
}
|
1062f17a3812e43774ce146d3e4dc86679425ac6 | 402c51ab42489645ab08567448a2c4cba1096712 | /arima-model.R | 7e73568f03955fd77fd175ecb9127d77a89c7a49 | [] | no_license | bnouyrigat/forecasting-bike-sharing | bca17309548b395ebb5221704c0ba5ee244a10d5 | 9a75106b217f02f946faca29c198a8cd6cdafd84 | refs/heads/master | 2020-03-20T22:27:24.034067 | 2018-06-19T18:45:32 | 2018-06-19T18:45:32 | 137,799,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,059 | r | arima-model.R | library('ggplot2')
library('forecast')
library('tseries')
daily_data = read.csv('./day.csv', header=TRUE, stringsAsFactors=FALSE)
daily_data$Date = as.Date(daily_data$dteday)
ggplot(daily_data, aes(Date, cnt)) + geom_line() + scale_x_date('month') + ylab("Daily Bike Checkouts") + xlab("")
count_ts = ts(daily_data[, c('cnt')])
daily_data$clean_cnt = tsclean(count_ts)
ggplot() + geom_line(data = daily_data, aes(x = Date, y = clean_cnt)) + ylab('Cleaned Bicycle Count')
daily_data$cnt_ma = ma(daily_data$clean_cnt, order=7) # using the clean count with no outliers
daily_data$cnt_ma30 = ma(daily_data$clean_cnt, order=30)
ggplot() +
geom_line(data = daily_data, aes(x = Date, y = clean_cnt, colour = "Counts")) +
geom_line(data = daily_data, aes(x = Date, y = cnt_ma, colour = "Weekly Moving Average")) +
geom_line(data = daily_data, aes(x = Date, y = cnt_ma30, colour = "Monthly Moving Average")) +
ylab('Bicycle Count')
count_ma = ts(na.omit(daily_data$cnt_ma), frequency=30)
decomp = stl(count_ma, s.window="periodic")
deseasonal_cnt <- seasadj(decomp)
plot(decomp)
adf.test(count_ma, alternative = "stationary")
Acf(count_ma, main='')
Pacf(count_ma, main='')
count_d1 = diff(deseasonal_cnt, differences = 1)
plot(count_d1)
adf.test(count_d1, alternative = "stationary")
Acf(count_d1, main='ACF for Differenced Series')
Pacf(count_d1, main='PACF for Differenced Series')
fit<-auto.arima(deseasonal_cnt, seasonal=FALSE)
tsdisplay(residuals(fit), lag.max=45, main='(1,1,1) Model Residuals')
fit2 = arima(deseasonal_cnt, order=c(1,1,7))
fit2
tsdisplay(residuals(fit2), lag.max=15, main='Seasonal Model Residuals')
fcast <- forecast(fit2, h=30)
plot(fcast)
hold <- window(ts(deseasonal_cnt), start=700)
fit_no_holdout = arima(ts(deseasonal_cnt[-c(700:725)]), order=c(1,1,7))
fcast_no_holdout <- forecast(fit_no_holdout,h=25)
plot(fcast_no_holdout, main=" ")
lines(ts(deseasonal_cnt))
fit_w_seasonality = auto.arima(deseasonal_cnt, seasonal=TRUE)
fit_w_seasonality
seas_fcast <- forecast(fit_w_seasonality, h=30)
plot(seas_fcast)
|
5824fd37693fb2a86b34e4954331a1746cb91093 | bf864ce7dc7edced7e8eca28a43ece359e2cda21 | /string_manipulation/dnaORrna.R | d82a9caff454fbe9ad2767b3a9f8947d3e66b2fc | [] | no_license | inambioinfo/r-codes | 557367f1942f19847bf18209380c42c62d49db6f | 8720b1ad1d27143a3ea01d1bb7ad78c2eda449e8 | refs/heads/master | 2020-06-13T09:54:42.257218 | 2018-10-13T05:54:02 | 2018-10-13T05:54:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 793 | r | dnaORrna.R | dnaORrna <- function(sequence){
#-------
sequence <- toupper(sequence)
#-------
#sequence <- 'ATGC'
nuc <- strsplit(sequence, split='')[[1]]
if (length(nuc) == 0){
stop("No sequence is given!!!")
}
#-------
conflictSeq <- intersect(c('T','U'),nuc)
if(length(conflictSeq) == 0){
output <- 'DNA or RNA'
}else{
#check for U
rna <- length(which(nuc == "U"))
if(rna >= 1){
output <- 'RNA'
rna.nuc <- c('A','U','G','C')
if(length(setdiff(nuc,rna.nuc))==0){
output <- 'RNA'
}else{
output <- 'The given sequence is not correct.'
}
}else{
dna.nuc <- c('A','T','G','C')
if(length(setdiff(nuc,dna.nuc))==0){
output <- 'DNA'
}else{
output <- 'The given sequence is not correct.'
}
}
}
return(output)
} |
ec716682441c9544badc12e4d6b6f1cc8c050159 | 1fb7ddd7f291bc6e2300e97086c450398c6e1636 | /Plot6.R | 62eb662e5d41257c519245067ab2b373cc635d26 | [] | no_license | Belphegorus/ExpDatAn_CP2 | acb1f247728ffbd04572207bae1d041dfdf01ad4 | ba2f9bb5d7fb3c028df954117273d94230998680 | refs/heads/master | 2016-09-01T07:33:49.234086 | 2016-03-27T17:27:46 | 2016-03-27T17:27:46 | 54,839,692 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,087 | r | Plot6.R |
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor
# vehicle sources in Los Angeles County, California (fips == "06037").
# Which city has seen greater changes over time in motor vehicle emissions?
library(ggplot2)
sum_ssc<- readRDS("summarySCC_PM25.rds")
Co <- readRDS("Source_Classification_Code.rds")
Merged<- merge(sum_ssc, Co, by="SCC")
fps <- sum_ssc[(sum_ssc$fips=="24510"|sum_ssc$fips=="06037") & sum_ssc$type=="ON-ROAD", ]
aggrYFps <- aggregate(Emissions ~ year + fips, fps, sum)
aggrYFps$fips[aggrYFps$fips=="24510"] <- "Baltimore, MD"
aggrYFps$fips[aggrYFps$fips=="06037"] <- "Los Angeles, CA"
png("plot6.png", width=1040, height=480)
g <- ggplot(aggrYFps, aes(factor(year), Emissions))
g <- g + facet_grid(. ~ fips)
g <- g + geom_bar(stat="identity") +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from motor vehicle (type=ON-ROAD) in Baltimore City, MD (fips = "24510") vs Los Angeles, CA (fips = "06037") 1999-2008')
print(g)
dev.off() |
c723fc38274ae55aeec9301c4db81ad84978f249 | c5de5d072f5099e7f13b94bf2c81975582788459 | /R Extension/RMG/Energy/Trading/Congestion/NEPOOL/ISO_Data/EnergyOffer/lib.investigate.energy.offers.R | 9750f7b40a54ce525d3205f0a6e055d192629131 | [] | no_license | uhasan1/QLExtension-backup | e125ad6e3f20451dfa593284507c493a6fd66bb8 | 2bea9262841b07c2fb3c3495395e66e66a092035 | refs/heads/master | 2020-05-31T06:08:40.523979 | 2015-03-16T03:09:28 | 2015-03-16T03:09:28 | 190,136,053 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,783 | r | lib.investigate.energy.offers.R | #
# EO.by_maskedNodeId - extract one unit in P, MW, segment form
# EO.by_month - cast the EO by month in P, MW, segment form
# EO.clean_PQ_pairs_of_NAs
# EO.get_unique_participantsMonth
#
# analyze_NorthfieldMountain
# analyze_SalemHarbor
#
# get_cleared_mw_lessThanHub
# get_cleared_mw_lessThanHR
# get_must_take_mw
#
# subsetMaskedNodeId - filter files by a MaskedNodeId
# subsetParticipantId - filter files by a participant id
#
##############################################################
# One masked ID at a time, sift through the monthly files,
# Cast the data to keep only the P, MW, segment
# If it's been processed already don't do it again.
#
EO.by_maskedNodeId <- function(maskedNodeId, redo=FALSE)
{
files <- list.files(DIR_EO, full.names=TRUE, pattern="^DB_")
fname <- paste(DIR_EO, "Split/", maskedNodeId, ".RData", sep="")
if (file.exists(fname)){
load(fname)
# subset existing data with the files
uMonths <- unique(format(EO$datetime - 60*60, "%Y%m")) # time is HE!
if (redo) {
fProcessed <- NULL
} else {
fProcessed <- paste(DIR_EO, "DB_", uMonths, ".RData", sep="")
}
files <- setdiff(files, fProcessed)
if (length(files)==0){
rLog("All done. Exiting")
return()
}
} else {
EO <- NULL # energy offers variable
}
X <- finalize(files, subsetMaskedNodeId, agg="rbind", maskedNodeId)
X <- EO.clean_PQ_pairs_of_NAs(X)
cnames <- as.vector(outer(c("P.", "MW."), 1:10, paste, sep=""))
ind <- c(1, 3, which(colnames(X) %in% cnames))
X <- X[, ind]
ind <- which(is.na(X$datetime))
if (length(ind)>0)
X <- X[-ind, ] # some NA crap from daylight savings
Y <- melt(X, id=1:2)
Y <- subset(Y, !is.na(value))
Y$segment <- gsub(".*\\.(.*)", "\\1", Y$variable)
Y$variable <- gsub("(.*)\\..*", "\\1", Y$variable)
#subset(Y, datetime == as.POSIXct("2011-03-14 01:00:00"))
Y <- cast(Y, datetime + nodeId + segment ~ variable, I, fill=NA) # 2 mins
Y <- Y[order(Y$datetime, Y$P),]
Y <- data.frame(Y)
# put them together
EO <- rbind(EO, Y)
EO <- EO[order(EO$datetime, EO$P),]
# save the file back
save(EO, file=fname)
rLog("Wrote file", fname)
}
##############################################################
# make the long file ...
#
EO.by_month <- function(fname, outdir, NO_BLOCKS=4)
{
rLog("Working on", fname)
month <- gsub("DB_(.*)\\.RData", "\\1", basename(fname))
fnameLong <- paste(outdir, "RData/Long/long_", month, ".RData", sep="")
if (file.exists(fnameLong)){
rLog("File exists. Done.")
return()
}
load(fname)
cnames <- as.vector(outer(c("P.", "MW."), 1:10, paste, sep=""))
ind <- which(colnames(res) %in% c("datetime", "nodeId", cnames))
res <- res[, ind]
ind <- which(is.na(res$datetime))
if (length(ind)>0)
res <- res[-ind, ] # some NA crap from daylight savings
uNodeId <- unique(res$nodeId)
BLOCK_SIZE <- ceiling(length(uNodeId)/NO_BLOCKS)
EO <- vector("list", length=NO_BLOCKS)
for (b in 1:NO_BLOCKS){
rLog(" block", b)
ind <- ((b-1)*BLOCK_SIZE+1):min((b*BLOCK_SIZE),length(uNodeId))
nodes <- uNodeId[ind]
aux <- melt(subset(res, nodeId %in% nodes), id=1:2)
aux <- subset(aux, !is.na(value))
aux$segment <- gsub(".*\\.(.*)", "\\1", aux$variable)
aux$variable <- gsub("(.*)\\..*", "\\1", aux$variable)
aux <- cast(aux, datetime + nodeId + segment ~ variable, I, fill=NA)
# some extra values due to daylight savings time
#aux <- aux[order(aux$datetime, aux$P),]
EO[[b]] <- data.frame(aux)
}
EO <- do.call("rbind", EO)
EO <- EO[order(EO$datetime, EO$nodeId, EO$P),]
rownames(EO) <- NULL
save(EO, file=fnameLong)
rLog("Wrote", fnameLong)
}
##############################################################
EO.clean_PQ_pairs_of_NAs <- function(X)
{
cnames <- as.vector(outer(c("P.", "MW."), 1:10, paste, sep=""))
ind <- which(colnames(X) %in% cnames)
ind.rm <- NULL # remove P,Q segments with NA values
for (i in ind){
if (all(is.na(X[,i])))
ind.rm <- c(ind.rm, i)
}
if (length(ind.rm)>0)
X <- X[,-ind.rm]
X
}
##############################################################
EO.get_unique_participantsMonth <- function(files=NULL)
{
if (is.null(files))
files <- list.files(DIR_EO, full.names=TRUE, pattern="^DB_")
FUN <- function(file=file){
yyyymm <- gsub(".*_(.*)\\.RData", "\\1", file)
month <- as.Date(paste(yyyymm, "01", sep=""), format="%Y%m%d")
load(file)
aux <- data.frame(month=as.character(month),
participantId=sort(unique(res$participantId)))
aux
}
res <- finalize(files, FUN)
fileOut <- paste(DIR_EO,"../uParticipantsMonth.csv", sep="")
write.csv(res, file=fileOut, row.names=FALSE)
rLog("Wrote file", fileOut)
res
}
##############################################################
# Unit 1 = 250 MW coal
# Unit 2 = 250 MW coal
# Unit 3 = 640 MW coal
# Unit 4 = 445 MW oil
analyze_Brayton <- function()
{
maskedNodeIds <- c(43551, 59958, 23789, 43414)
names(maskedNodeIds) <- c("Unit 1", "Unit 2", "Unit 3", "Unit 4 (Oil)")
for (id in maskedNodeIds) EO.by_maskedNodeId(id) # extract the Energy Offers
files <- paste(DIR_EO, "Split/", maskedNodeIds, ".RData", sep="")
EO <- finalize(files, function(file=file){load(file); EO})
EO <- merge(EO, HUB[,c("datetime", "hub")], by="datetime")
EO <- merge(EO, data.frame(nodeId=maskedNodeIds, unit=names(maskedNodeIds)))
# pick only one hour during the day, for the coal units only
EO$HE <- as.numeric(format(EO$datetime, "%H"))
eo <- subset(EO, HE==17 & nodeId %in% c(43551, 59958, 23789))
# get cleared mw less than hub price
qL <- cast(EO, datetime ~ ., sum, value="MW", subset=P<=hub)
names(qL)[2] <- "MW<Hub"
summary(qL$`MW<Hub`)
xyplot(`MW<Hub` ~ datetime, data=qL, type=c("g", "l"),
subset=datetime >= as.POSIXct("2008-01-01"))
# get the average offer price of the coal units
aux <- subset(EO, nodeId %in% c(43551, 59958, 23789))
avgP <- ddply(aux, .(datetime), function(x){
data.frame(avgP = sum(x$MW * x$P)/sum(x$MW),
minP = min(x$P),
maxP = max(x$P))})
# plot the price of first block
aux <- subset(EO, datetime >= as.POSIXct("2008-01-01"))
xyplot(P ~ datetime | unit, data=aux,
subset=segment=="1" & nodeId %in% c(43551, 59958, 23789) & P < 100,
layout=c(1,3), type=c("g", "p"),
ylab="Price of first energy offer point, $/MWh",
xlab="",
main="Brayton Point, coal units")
aux <- aux[order(aux$unit, aux$segment, aux$datetime),]
xyplot(P ~ datetime | unit, data=aux, groups=segment,
subset=nodeId %in% c(43551) & P < 300,
layout=c(1,1), type=c("g", "l"),
ylab="Price of first energy offer point, $/MWh",
xlab="",
main="Brayton Point, coal units")
# how does quantity cleared correlates with hub prices?
res <- merge(qL, HUB[,c("datetime", "hub")], by="datetime")
res <- merge(res, avgP, by="datetime")
plot(res$hub, res$`MW<Hub`)
aux <- melt(res[,-2], id=1)
xyplot(value ~ datetime, data=aux, groups=variable,
type=c("g", "l"),
subset=datetime >= as.POSIXct("2008-01-01"),
ylim=c(0, 220))
xyplot(minP ~ datetime, data=res, type=c("g", "l"),
subset=datetime >= as.POSIXct("2008-01-01"),
ylim=c(0, 220))
}
##############################################################
#
analyze_Manchester <- function()
{
maskedNodeIds <- c(37274, 92137, 72183)
for (id in maskedNodeIds) EO.by_maskedNodeId(id) # extract the Energy Offers
files <- paste(DIR_EO, "Split/", maskedNodeIds, ".RData", sep="")
totalQ <- get_cleared_mw_lessThanHub(files, HUB)
hr <- seq(6, 19, by=1)
aux <- subset(totalQ, datetime > as.POSIXct("2010-05-01"))
plot(aux$datetime, aux$MW, type="l", col="blue",
main="Manchester CC", cex.main=1, ylab="MW")
}
##############################################################
#
analyze_Mystic <- function()
{
maskedNodeIds <- c(60658, 11009, 72020) # Mystic 7, 8, 9 in this order
for (id in maskedNodeIds) EO.by_maskedNodeId(id) # extract the Energy Offers
files <- paste(DIR_EO, "Split/", maskedNodeIds, ".RData", sep="")
totalQ <- get_cleared_mw_lessThanHub(files, HUB)
# only Mystic7
files <- paste(DIR_EO, "Split/", maskedNodeIds[1], ".RData", sep="")
totalQ <- get_cleared_mw_lessThanHub(files, HUB)
totalQ <- zoo(totalQ$MW, totalQ$datetime)
aux <- subset(totalQ, datetime > as.POSIXct("2008-01-01"))
plot(aux$datetime, aux$MW, type="l", col="blue",
main="Mystic 7", cex.main=1, ylab="MW in the money")
}
##############################################################
# pass in the
analyze_NorthfieldMountain <- function(X)
{
maskedNodeIds <- c(53298, 60188, 66602, 83099)
files <- paste(DIR_EO, "Split/", maskedNodeIds, ".RData", sep="")
EO <- finalize(files, function(file=file){load(file); EO})
EO <- merge(EO, HUB[,c("datetime", "hub")], by="datetime")
EO$nodeId <- factor(EO$nodeId)
xyplot(P ~ datetime | nodeId, data=EO, groups=segment,
xlab="", ylab="Price", layout=c(1,4))
windows()
xyplot(P ~ datetime | nodeId, data=EO, groups=segment,
xlab="", ylab="Price", layout=c(2,2),
subset=datetime >= as.POSIXct("2011-01-01"))
day <- as.Date("2011-04-15")
DD <- subset(EO, (datetime >= as.POSIXct(paste(day, "01:00:00")) &
datetime <= as.POSIXct(paste(day+1, "00:00:00"))) )
DD$HE <- format(DD$datetime, "%H")
DD <- DD[order(DD$datetime, DD$nodeId, DD$segment), ]
split(DD, DD$HE)
subset(EO, datetime >= as.POSIXct(paste(day, "01:00:00")) )
#EO <- merge(EO, data.frame(nodeId=maskedNodeIds, unit=names(maskedNodeIds)))
aux <- subset(Y, cMW >= 810)
p810 <- ddply(aux, .(datetime), function(x){x$P[1]}) # 30 secs
plot(p810$datetime, p810[,2], type="l", col="blue")
aux <- Y[1:120,]
aux$cMW <- unlist(tapply(aux$MW, aux$datetime, cumsum))
# price of the first 3 units
p810 <- ddply(aux, .(datetime), function(x){
#browser()
res <- NA
ind <- which(cumsum(x$MW) >= 810)
if (length(ind)>0){
res <- x$P[ind[1]]
}
return(res)
})
}
##############################################################
#
analyze_SalemHarbor <- function()
{
maskedNodeIds <- c(79866, 34993, 16337, 45823)
for (id in maskedNodeIds) EO.by_maskedNodeId(id) # extract the Energy Offers
files <- paste(DIR_EO, "Split/", maskedNodeIds, ".RData", sep="")
totalQ <- get_cleared_mw(files, HUB)
aux <- subset(totalQ, datetime > as.POSIXct("2010-05-01"))
plot(aux$datetime, aux$MW, type="l", col="blue",
main="Salem Harbor", cex.main=1, ylab="MW")
}
#####################################################################
# sum up the offered MWs with prices < Hub price for that hour
#
#
get_cleared_mw_lessThanHub<- function(files, HUB)
{
EO <- finalize(files, function(file=file){load(file); EO})
EO <- merge(EO, HUB[,c("datetime", "hub")], by="datetime")
EO <- subset(EO, P <= hub)
totalQ <- cast(EO, datetime ~ ., sum, value="MW")
names(totalQ)[2] <- "MW"
totalQ
}
#####################################################################
# sum up the bid quantity only for bids < gasPrice*HR for that hour
# hr can be a vector
#
get_cleared_mw_lessThanHR <- function(files, HUB, hr)
{
EO <- finalize(files, function(file=file){load(file); EO})
EO <- merge(EO, HUB[,c("datetime", "gas")], by="datetime")
QQ <- vector("list", length=length(hr))
for (i in seq_along(hr)){
aux <- subset(EO, P <= gas*hr[i])
aux <- cast(aux, datetime ~ ., sum, value="MW")
names(aux)[2] <- "MW"
aux$hr <- hr[i]
QQ[[i]] <- aux
}
QQ <- do.call("rbind", QQ)
QQ
}
#####################################################################
# Define self scheduled as MW offered as must take + MW offered at
# a HR < 4
#
get_must_take_mw <- function(plots=FALSE)
{
subsetFun <- function(file=file){
load(file)
mustTake <- subset(res, Must.Take.Energy != 0)
mustTake <- cast(mustTake, datetime ~ ., sum, value="Must.Take.Energy")
as.data.frame(mustTake)
}
# load all ISO files with demand bids, filter by constellation
files <- list.files(DIR_EO, full.names=TRUE, pattern="^DB_")
res <- finalize(files, subsetFun)
names(res)[2] <- "mustTakeEnergy"
if (plots){
aux <- subset(res, datetime > as.POSIXct("2010-01-01"))
plot(aux$datetime, aux$mustTakeEnergy, type="l")
aux <- res
aux$date <- as.Date(res$datetime)
aux <- cast(aux, date ~ ., mean, value="mustTakeEnergy")
names(aux)[2] <- "mustTakeEnergy"
plot(aux$date, aux$mustTakeEnergy, type="l")
aux$year <- format(aux$date, "%Y")
aux$month <- as.numeric(format(aux$date, "%m"))
xyplot(mustTakeEnergy ~ month|year, data=aux, layout=c(3,1))
aux <- res
aux$date <- as.Date(res$datetime)
aux <- cast(aux, date ~ ., mean, value="mustTakeEnergy")
names(aux)[2] <- "mustTakeEnergy"
aux$yyyymm <- format(aux$date, "%Y-%m")
aux$year <- format(aux$date, "%Y")
aux$month <- as.numeric(format(aux$date, "%m"))
bwplot(mustTakeEnergy ~ year | month, data=aux)
}
res
}
subsetMaskedNodeId <- function(file=file, maskedNodeIds){
load(file)
res <- subset(res, nodeId %in% maskedNodeIds)
as.data.frame(res)
}
subsetParticipantId <- function(file=file, maskedParticipantId){
load(file)
res <- subset(res, participantId == maskedParticipantId)
as.data.frame(res)
}
.clean.files <- function()
{
files <- list.files(paste(DIR_EO, "Long", sep=""), full.names=TRUE,
pattern="^long_")
for (f in files){
load(f)
EO <- Y
rownames(EO) <- NULL
save(EO, file=f)
}
}
|
8b025ebb4f9bff1f052412d5670e1245b218b0ec | 258bdbfae973089c337fd31c6162ccb929714043 | /NC140/root-distribution.R | 9f61a35b1fb907cda09f7dc4ac579b5f2af3e303 | [] | no_license | weecology/branch-arch | 897a4df4953561a4f90e754d5c8f2873bce046cb | d8f4d518e3f5165f810ee845d10d90a3d95acf71 | refs/heads/master | 2021-09-05T21:56:14.751742 | 2021-08-05T21:13:05 | 2021-08-05T21:13:05 | 12,822,147 | 1 | 0 | null | 2016-01-07T22:02:05 | 2013-09-14T00:36:11 | HTML | UTF-8 | R | false | false | 1,955 | r | root-distribution.R | ### This script generates Root Biomass Distribution plots for within and between
### row transects
source('~/Desktop/branch-arch/NC140/root-yield.R', echo = FALSE)
vplayout <- function(x,y){
viewport(layout.pos.row = x, layout.pos.col = y)
}
within_plots <- c()
for (i in c(1:5)){
rootstock_within <- filter(roots_depth,
rootstock == unique(rootstocks)[i],
direction == "within")
plot <- ggplot(rootstock_within,
aes(x = distance, y = (-1*max_depth), z = total_roots))
within_plots[[i]] <- plot +
stat_contour(geom="polygon", aes(fill=..level..), bins = 30) +
labs(title = unique(rootstocks)[i])
}
pdf('Within-Row-Total-Mass.pdf', width= 14, height=7, family="Helvetica", pointsize=12)
grid.newpage()
pushViewport(viewport(layout = grid.layout(2,3)))
print(within_plots[[1]], vp = vplayout(1,1))
print(within_plots[[2]], vp = vplayout(1,2))
print(within_plots[[3]], vp = vplayout(1,3))
print(within_plots[[4]], vp = vplayout(2,1))
print(within_plots[[5]], vp = vplayout(2,2))
dev.off()
between_plots <- c()
for (i in c(1:5)){
rootstock_between <- filter(roots_depth,
rootstock == unique(rootstocks)[i],
direction == "between")
plot <- ggplot(rootstock_between,
aes(x = distance, y = (-1*max_depth), z = total_roots))
between_plots[[i]] <- plot +
stat_contour(geom="polygon", aes(fill=..level..), bins = 30) +
labs(title = unique(rootstocks)[i])
}
pdf('Between-Row-Total-Mass.pdf', width= 14, height=7, family="Helvetica", pointsize=12)
grid.newpage()
pushViewport(viewport(layout = grid.layout(2,3)))
print(between_plots[[1]], vp = vplayout(1,1))
print(between_plots[[2]], vp = vplayout(1,2))
print(between_plots[[3]], vp = vplayout(1,3))
print(between_plots[[4]], vp = vplayout(2,1))
print(between_plots[[5]], vp = vplayout(2,2))
dev.off() |
e6de055619e373aa46b951e188803ae92d724b2b | ccbb0b70874ac382e948d940a178793d263eb68c | /genuary/2021/2021-23/deprecated/2021-23d.R | b4fdc848452c5d07f4a6560a38cd0a48271a1ea5 | [
"MIT"
] | permissive | joelfishbein/aRtist | 125f6b96031e64a819eb8330645bd3a7b98d8b63 | 5da0d46d0c618d28243f69d67e22cfa10e911cbd | refs/heads/main | 2023-08-20T19:51:15.859878 | 2021-10-21T02:54:15 | 2021-10-21T02:54:15 | 360,969,255 | 0 | 0 | MIT | 2021-04-23T18:11:26 | 2021-04-23T18:11:25 | null | UTF-8 | R | false | false | 1,263 | r | 2021-23d.R | # https://bookdown.org/rdpeng/RProgDA/building-new-graphical-elements.html
library(grid)
GeomMyPoint <- ggproto("GeomMyPoint", Geom,
required_aes = c("x", "y"),
default_aes = aes(shape = 1),
draw_key = draw_key_point,
draw_panel = function(data, panel_scales, coord) {
## Transform the data first
coords <- coord$transform(data, panel_scales)
## Construct a grid grob
polygonGrob(
x = coords$x,
y = coords$y
)
})
geom_mypoint <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", na.rm = FALSE,
show.legend = NA, inherit.aes = TRUE, ...) {
ggplot2::layer(
geom = GeomMyPoint, mapping = mapping,
data = data, stat = stat, position = position,
show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
ggplot(data = cube_df, aes(x, y, group = zorder)) +
geom_mypoint(fill = "pink") +
coord_fixed()
|
3a9fd62f4fd59a26e460ae033d07f07555be9e58 | 41f37ff43cc885a0d99aefe33ee3af25f7ab7402 | /plotit2.r | 34d33104c64c347a8119856b1a58b2596687f58c | [] | no_license | jgarofoli/network-ping-analysis | d1b3d846752cb1543b97c960cfc8dd72d4f50f15 | ed521bec05704e01000a7fc4d3fc82b0ccdeadb8 | refs/heads/master | 2021-01-10T20:39:31.104591 | 2015-01-25T03:48:19 | 2015-01-25T03:48:19 | 29,743,918 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,431 | r | plotit2.r | mkreport <- function(fname) {
mydata <- read.table(fname)
x <- (mydata[,1] - mydata[1,1])/60./60.
y <- mydata[,3]
#pdf('figure1.pdf')
quartz()
par(mfrow=c(2,2))
plot(y~x,ylab="pings returned (out of 10)",xlab="time (hr)",main=fname,
pch=16,col=rgb(0,0,0,1/8))
#dev.off()
slices <- hist(y,plot=F)#ylab="entries [N]",xlab="number of successful pings [out of 4]", main="dist. of successful pings",plot=T)
nsl <- c(sum(slices$density[1:7]),slices$density[8],slices$density[9],slices$density[10])
lbls <- c("<7","8","9","10")
pie(nsl,labels=lbls,main="fraction of\nreturned pings")
timedelta <- diff(mydata$V1,2)
hist(timedelta,plot=T,
xlab='time between ping calls [sec]',
ylab='entries [N]',
main="distribution of time\nbetween ping requests",
breaks=0:60)
selZero <- mydata$V1[mydata$V3 <= 7]
selZeroDelta <- diff(selZero,2)
hist(selZeroDelta,plot=T,
breaks=seq(0,max(selZeroDelta)+100,30),
main="distribution of time\n between <7 pings returned events",
ylab="entries [N/30 seconds]",
xlab="time between total failures [sec]",
)
quartz()
par(mfrow=c(2,2))
hist(mydata$V4,plot=T,
xlab="avg round trip ping time [ms]",
ylab="entries [N]",
main="ping time",
breaks=seq(0,1000,5),
xlim=c(0,100))
mydata
}
|
0f096882478525c32f8dfb4c20d373bfec24cd44 | 198eddc28dd4b9cd2d02c294f6f77c2baa8d91e1 | /man/ggchisq_res.Rd | 1283bc1de8ba42fb2126cf5087699dd64542713b | [] | no_license | xtmgah/JLutils | 2c2819e78a70e0f0afeac385ecdfba003841fbef | 20eb88f01f88a73a12d9a4c4a071e5fc9f796c2d | refs/heads/master | 2021-05-18T17:53:23.560462 | 2020-03-16T09:42:24 | 2020-03-16T09:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,512 | rd | ggchisq_res.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggchisq_res.R
\name{ggchisq_res}
\alias{ggchisq_res}
\title{Chi-squared residuals matrix plot}
\usage{
ggchisq_res(
formula,
data,
weight = NULL,
addNA = FALSE,
label = NULL,
breaks = c(-4, -2, 2, 4),
palette = "RdBu",
return_data = FALSE
)
}
\arguments{
\item{formula}{formula of variables to be cross-tabulated, rows on left hand side and columns on the right hand side}
\item{data}{data frame containing the data}
\item{weight}{optionnal string indicating a column containing weights}
\item{addNA}{whether to include NA values in the tables}
\item{label}{optionnal cell labels (see examples)}
\item{breaks}{how to recode residuals into categories?}
\item{palette}{Brewer colour palette (see \url{http://colorbrewer2.org})}
\item{return_data}{return computed data.frame instead of ggplot?}
}
\value{
a ggplot graphic or a data frame if \code{return_data == TRUE}.
}
\description{
Chi-squared residuals matrix plot
}
\examples{
ggchisq_res(
Sex + Age + Class ~ Survived,
data = as.data.frame(Titanic),
weight = "Freq")
ggchisq_res(
Sex + Age + Class ~ Survived,
data = as.data.frame(Titanic),
weight = "Freq",
return_data = TRUE)
ggchisq_res(
Sex + Age + Class ~ Survived,
data = as.data.frame(Titanic),
weight = "Freq",
label = "scales::percent(row.prop)")
ggchisq_res(
Sex + Age + Class ~ Survived,
data = as.data.frame(Titanic),
weight = "Freq",
breaks = c(-4, -2, 0, 2, 4))
}
|
3f64fd5cc5e7dc7bb5aafb4aec80e9139d545401 | 340d2dd9b14fdb3d3e4d70a69389b6bf70faf0c7 | /scripts/4_excess_deaths_global_estimates_export_for_interactive.R | 9989dd2cfdf6bfd4dd2a836d549668280e6a8380 | [
"MIT"
] | permissive | TheEconomist/covid-19-the-economist-global-excess-deaths-model | ba56184a57488c6f065a2428837eecfc2f56a678 | 07a9b7fc43dac6f1d1741e51f175555e73c18e97 | refs/heads/main | 2021-09-09T09:10:49.987777 | 2021-09-09T08:05:22 | 2021-09-09T08:05:22 | 366,795,934 | 445 | 84 | MIT | 2022-10-05T16:52:01 | 2021-05-12T17:24:01 | R | UTF-8 | R | false | false | 37,311 | r | 4_excess_deaths_global_estimates_export_for_interactive.R | # Step 1: import libraries ------------------------------------------------------------------------------
# This script constructs custom data frames to populate the The Economist's interactive presentation of this estimates
# Import libraries
library(tidyverse)
library(data.table)
library(lubridate)
library(readr)
library(countrycode)
library(ggplot2)
options(scipen=999)
# Step 2: import official covid-19 data from Our World in Data ------------------------------------------------------------------------------
## A. Import official covid data from Our World In Data
country_daily_data_raw <- fread("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
# If no data from current day
country_daily_data <- country_daily_data_raw[order(country_daily_data_raw$date), ]
# Create 7-day average of deaths for use in graphics (replacing the OWD version)
country_daily_data$new_deaths_smoothed <- ave(country_daily_data$new_deaths, country_daily_data$location, FUN = function(x){
unlist(lapply(1:length(x), FUN = function(i){
mean(x[max(c(1,i-7)):i], na.rm = T)
}))
})
# Step 3: Generate data for main map ------------------------------------------------------------------------------
# Import data:
daily <- read_csv("output-data/export_country_per_100k.csv")
cumulative <- read_csv("output-data/export_country_per_100k_cumulative.csv")
# Reshape the OWD data to prepare for merging with our estimates (create per 100k, etc)
main_map_covid_data <- country_daily_data %>%
mutate(date = as.Date(date),
iso3c = iso_code,
daily_covid_deaths_per_100k = (new_deaths_smoothed / population) * 100000,
cumulative_covid_deaths_per_100k = (total_deaths / population) * 100000) %>%
filter(date == max(date)) %>% # Select most recent date
dplyr::select(iso3c,daily_covid_deaths_per_100k,cumulative_covid_deaths_per_100k)
# Merge together
main_map <- merge(daily[daily$date == max(daily$date), c("iso3c", "date", "estimated_daily_excess_deaths_per_100k", "estimated_daily_excess_deaths_ci_95_top_per_100k",
"estimated_daily_excess_deaths_ci_95_bot_per_100k")],
cumulative[cumulative$date == max(cumulative$date), c("iso3c", "cumulative_estimated_daily_excess_deaths_per_100k",
"cumulative_estimated_daily_excess_deaths_ci_95_top_per_100k",
"cumulative_estimated_daily_excess_deaths_ci_95_bot_per_100k")])
main_map <- merge(main_map, main_map_covid_data, all.x=T)
# Write to file
write_csv(main_map, "output-data/output-for-interactive/main_map.csv")
# Step 4: World, country or region by day (data generation) ------------------------------------------------------------------------------
# Get official covid data in long format:
# 1. Generate custom regions not in Our World in Data: North America and Latin America
lat_am <- data.frame(country_daily_data[country_daily_data$continent %in%
c("North America", "South America") &
!country_daily_data$location %in%
c("United States", "Canada"), ])
lat_am$location <- "Latin America and Caribbean"
north_am <- data.frame(country_daily_data[country_daily_data$location %in%
c("United States", "Canada"), ])
north_am$location <- "North America"
for(i in c("new_deaths_smoothed",
"total_deaths",
"new_vaccinations_smoothed",
"total_vaccinations",
"people_vaccinated",
"people_fully_vaccinated",
"population")){
lat_am[, i] <- ave(lat_am[, i], lat_am$date,
FUN = function(x) sum(x, na.rm = T))
north_am[, i] <- ave(north_am[, i], north_am$date,
FUN = function(x) sum(x, na.rm = T))
}
# Remove duplicate dates
lat_am <- lat_am[!duplicated(lat_am$date), ]
north_am <- north_am[!duplicated(north_am$date), ]
# Ensuring populations are consistent across time (not confounded by missing data)
lat_am$population <- max(lat_am$population)
north_am$population <- max(north_am$population)
# Set other data to NA (not used)
lat_am[, setdiff(colnames(lat_am), c("location",
"date",
"new_deaths_smoothed",
"total_deaths",
"new_vaccinations_smoothed",
"total_vaccinations",
"people_vaccinated",
"people_fully_vaccinated",
"population"))] <- NA
north_am[, setdiff(colnames(north_am), c("location",
"date",
"new_deaths_smoothed",
"total_deaths",
"new_vaccinations_smoothed",
"total_vaccinations",
"people_vaccinated",
"people_fully_vaccinated",
"population"))] <- NA
# Append to central dataset, and select subset of this dataset to prepare for merging:
covid_data_long <- rbind(country_daily_data[country_daily_data$location != "North America", ], lat_am, north_am) %>%
mutate(date = as.Date(date),
iso3c = iso_code,
total_vaccinations_per_100 = (total_vaccinations / population) * 100,
daily_vaccinations = new_vaccinations_smoothed,
daily_vaccinations_per_100 = 100*new_vaccinations_smoothed / population,
vaccinated = people_vaccinated,
vaccinated_per_100 = 100*people_vaccinated / population,
fully_vaccinated = people_fully_vaccinated,
fully_vaccinated_per_100 = 100*people_fully_vaccinated / population,
daily_covid_deaths = new_deaths_smoothed,
daily_covid_deaths_per_100k = (new_deaths_smoothed / population) * 100000,
cumulative_covid_deaths = total_deaths,
cumulative_covid_deaths_per_100k = (total_deaths / population) * 100000) %>%
dplyr::select(iso3c,
location,
date,
population,
daily_vaccinations,
daily_vaccinations_per_100,
total_vaccinations,
total_vaccinations_per_100,
vaccinated,
vaccinated_per_100,
fully_vaccinated,
fully_vaccinated_per_100,
daily_covid_deaths,
daily_covid_deaths_per_100k,
cumulative_covid_deaths,
cumulative_covid_deaths_per_100k) %>%
pivot_longer(cols = daily_covid_deaths:cumulative_covid_deaths_per_100k) %>%
dplyr::rename(population_owd = population,
official_covid_data_type = name,
official_covid_deaths = value)
# Add correspondence to facilitate later merging:
covid_data_long$merge_column <- NA
covid_data_long$merge_column[covid_data_long$official_covid_data_type == "daily_covid_deaths"] <- "daily_excess_deaths"
covid_data_long$merge_column[covid_data_long$official_covid_data_type == "daily_covid_deaths_per_100k"] <- "daily_excess_deaths_per_100k"
covid_data_long$merge_column[covid_data_long$official_covid_data_type == "cumulative_covid_deaths"] <- "daily_excess_deaths_cumulative"
covid_data_long$merge_column[covid_data_long$official_covid_data_type == "cumulative_covid_deaths_per_100k"] <- "daily_excess_deaths_per_100k_cumulative"
# 2. Define function to get model estimates in "long" format:
long_exp_df <- function(files = c("output-data/export_world.csv",
"output-data/export_world_per_100k.csv",
"output-data/export_world_cumulative.csv",
"output-data/export_world_per_100k_cumulative.csv"),
types = c("daily_excess_deaths",
"daily_excess_deaths_cumulative",
"daily_excess_deaths_per_100k_cumulative",
"daily_excess_deaths_per_100k"),
col_names = c("location",
"date",
"population",
"estimate",
"estimate_top_95",
"estimate_top_90",
"estimate_top_50",
"estimate_bot_50",
"estimate_bot_90",
"estimate_bot_95",
"raw_estimate",
"recorded",
"daily_covid_deaths")){
# Check that type is provided for each
if(length(files) != length(types)){
stop("Please provide a type for each file.")
}
# Create container data frame
result <- data.frame()
# Cycle through files, rename columns, add type, and bind together
for(i in 1:length(files)){
temp <- read_csv(files[i])
colnames(temp) <- col_names
temp$type <- types[i]
result <- rbind(result, temp)
}
# Return combined data
return(result)
}
# 3. Load world estimates
world_long <- long_exp_df(files = c("output-data/export_world.csv",
"output-data/export_world_per_100k.csv",
"output-data/export_world_cumulative.csv",
"output-data/export_world_per_100k_cumulative.csv"),
types = c("daily_excess_deaths",
"daily_excess_deaths_per_100k",
"daily_excess_deaths_cumulative",
"daily_excess_deaths_per_100k_cumulative"))
# 4. Load region estimates
region_long <- long_exp_df(files = c("output-data/export_regions.csv",
"output-data/export_regions_per_100k.csv",
"output-data/export_regions_cumulative.csv",
"output-data/export_regions_per_100k_cumulative.csv"),
types = c("daily_excess_deaths",
"daily_excess_deaths_per_100k",
"daily_excess_deaths_cumulative",
"daily_excess_deaths_per_100k_cumulative"))
# As we prefer Canada + United States as North America, we remove the duplicate (see below where this is added). We also remove countries which figure separately in the standard region specification
region_long <- region_long[!region_long$location %in% c("North America", "Asia", "Oceania",
"China",
"India",
"Russia",
"United States"),]
# Load alternative region estimates (Lat Am vs North America, EU as separate entity)
region_alt <- long_exp_df(files = c("output-data/output-by-alternative-regions/export_regions_lat_am_na_eu.csv",
"output-data/output-by-alternative-regions/export_regions_lat_am_na_eu_per_100k.csv",
"output-data/output-by-alternative-regions/export_regions_lat_am_na_eu_cumulative.csv",
"output-data/output-by-alternative-regions/export_regions_lat_am_na_eu_per_100k_cumulative.csv"),
types = c("daily_excess_deaths",
"daily_excess_deaths_per_100k",
"daily_excess_deaths_cumulative",
"daily_excess_deaths_per_100k_cumulative"))
# 5. Load country estimates
country_long <- long_exp_df(files = c("output-data/export_country.csv",
"output-data/export_country_per_100k.csv",
"output-data/export_country_cumulative.csv",
"output-data/export_country_per_100k_cumulative.csv"),
types = c("daily_excess_deaths",
"daily_excess_deaths_per_100k",
"daily_excess_deaths_cumulative",
"daily_excess_deaths_per_100k_cumulative"),
col_names = c("iso3c",
"date",
"population",
"estimate",
"estimate_top_95",
"estimate_top_90",
"estimate_top_50",
"estimate_bot_50",
"estimate_bot_90",
"estimate_bot_95",
"raw_estimate",
"recorded",
"daily_covid_deaths"))
# 6. Harmonize location names
country_long <- merge(country_long, unique(covid_data_long[, c("location", "iso3c")]),
by="iso3c", all.x = T)
region_long$iso3c <- NA
region_alt$iso3c <- NA
world_long$iso3c <- NA
# 7. Bind all of these together
export_long <- rbind(world_long,
region_long,
region_alt,
country_long)
export_long$daily_covid_deaths <- NULL # Remove this, as we are getting this data in the next step
# 8. Merge with official covid data
export_long <- merge(export_long,
covid_data_long[ , setdiff(colnames(covid_data_long), "iso3c")],
by.x=c("type", "location", "date"),
by.y=c("merge_column", "location", "date"),
all.x = T)
# 9. Make names follow The Economist standard:
econ_names <- read_csv("source-data/economist_country_names.csv") %>%
rename(
econ_name = Name,
economist_region = Regions,
iso3c = ISOA3
) %>%
select(
econ_name,
iso3c,
economist_region
) %>% unique()
export_long <- merge(export_long, econ_names[!is.na(econ_names$iso3c), ],
by = "iso3c", all.x = T)
export_long$location[!is.na(export_long$econ_name)] <- export_long$econ_name[!is.na(export_long$econ_name)]
export_long$econ_name <- NULL
# Construct "is recorded" dummy variable:
export_long$known_excess_deaths <- FALSE
export_long$known_excess_deaths[!is.na(export_long$recorded)] <- TRUE
export_long$known_excess_deaths[export_long$type %in% c("daily_excess_deaths_cumulative", "daily_excess_deaths_per_100k_cumulative")] <- FALSE
# No region reports excess deaths directly:
export_long$known_excess_deaths[!export_long$iso3c %in% country_long$iso3c] <- FALSE
# The EU reports it for all countries for some dates:
# This cycles through all dates, checks if all EU countries have reported excess deaths, and if so, sets known_excess_deaths for the EU to "TRUE":
for(i in unique(export_long$date)){
if(min(as.numeric(export_long$known_excess_deaths[export_long$iso3c %in% c("AUT", "BEL", "BGR", "HRV", "CYP", "CZE", "DNK", "EST", "FIN", "FRA", "DEU", "GRC", "HUN", "IRL", "ITA", "LVA", "LTU", "LUX", "MLT", "NLD", "POL", "PRT", "ROU", "SVK", "SVN", "ESP", "SWE") & export_long$date == i & export_long$type %in% c("daily_excess_deaths_per_100k", "daily_excess_deaths") ])) == 1){
export_long$known_excess_deaths[export_long$type %in% c("daily_excess_deaths_per_100k", "daily_excess_deaths") & export_long$date == i & export_long$location == "European Union"] <- TRUE
}
}
# Inspect if desired:
if(inspect){
type <- "daily_excess_deaths"
ggplot(export_long[export_long$location %in% c("United States",
"European Union", "Europe",
"World", "Latin America and Caribbean") &
!is.na(export_long$location) &
export_long$type == type, ],
aes(x=date, y=official_covid_deaths, col = "official"))+
geom_line(aes(col = "estimate"))+
geom_line(aes(y=daily_vaccinations/1000, col = "Daily vaccinations, 1000s"))+
geom_line(aes(y=estimate, col = "model estimate"))+xlab("")+
facet_grid(.~location)+theme_minimal()+theme(legend.position = "bottom")+ylab("")+ggtitle(type)
type <- "daily_excess_deaths_cumulative"
ggplot(export_long[export_long$location %in% c("United States",
"European Union", "Europe",
"World", "Latin America and Caribbean") &
!is.na(export_long$location) &
export_long$type == type, ],
aes(x=date, y=official_covid_deaths, col = "official"))+
geom_line(aes(col = "estimate"))+
geom_line(aes(y=total_vaccinations/1000, col = "Total vaccinations, 1000s"))+
geom_line(aes(y=estimate, col = "model estimate"))+xlab("")+
facet_grid(.~location)+theme_minimal()+theme(legend.position = "bottom")+ylab("")+ggtitle(type)
type <- "daily_excess_deaths_per_100k"
ggplot(export_long[export_long$location %in% c("United States",
"European Union", "Europe",
"World", "Latin America and Caribbean") &
!is.na(export_long$location) &
export_long$type == type, ],
aes(x=date, y=official_covid_deaths, col = "official"))+
geom_line(aes(col = "estimate"))+
geom_line(aes(y=estimate, col = "model estimate"))+
geom_line(aes(y=total_vaccinations_per_100/100, col = "Vaccinations per person"))+
xlab("")+
facet_grid(.~location)+theme_minimal()+theme(legend.position = "bottom")+ylab("")+ggtitle(type)
type <- "daily_excess_deaths_per_100k_cumulative"
ggplot(export_long[export_long$location %in% c("United States",
"European Union", "Europe",
"World", "Latin America and Caribbean") &
!is.na(export_long$location) &
export_long$type == type, ],
aes(x=date, y=official_covid_deaths, col = "official"))+
geom_line(aes(col = "estimate"))+
geom_line(aes(y=estimate, col = "model estimate"))+
geom_line(aes(y=total_vaccinations_per_100, col = "Vaccinations per 100"))+
facet_grid(.~location)+theme_minimal()+ylab("")+ggtitle(type)+xlab("")
}
# 10. Round location files to 2 digits for interactive
for(i in c("estimate", "estimate_top_95", "estimate_top_90", "estimate_top_50", "estimate_bot_50", "estimate_bot_90", "estimate_bot_95", "raw_estimate", "recorded", "official_covid_deaths")){
export_long[, i] <- round(export_long[, i], 3)
}
# 11. Sort by date
export_long <- export_long[order(export_long$date), ]
# Step 6: Write line charts to files (and world cumulative for most recent date) ------------------------------------------------------------------------------
# Select columns to include:
columns_to_include <- c("location", "date", "type", "estimate",
"estimate_top_95",
"estimate_top_50",
"estimate_bot_50",
"estimate_bot_95",
"official_covid_deaths",
"known_excess_deaths")
columns_to_export <- setdiff(columns_to_include, "type")
# By country:
write_csv(export_long[!export_long$location %in% c("Africa", "Oceania", "Americas", "Asia", "Asia & Oceania", "Europe", "Latin America and Caribbean", "North America", "World"), ], "output-data/output-for-interactive/by_location_full_data.csv")
write_csv(export_long[!export_long$location %in% c("Africa", "Oceania", "Americas", "Asia", "Asia & Oceania", "Europe", "Latin America and Caribbean", "North America", "World") & export_long$type == "daily_excess_deaths", columns_to_export],
"output-data/output-for-interactive/by_location.csv")
write_csv(export_long[!export_long$location %in% c("Africa", "Oceania", "Americas", "Asia", "Asia & Oceania", "Europe", "Latin America and Caribbean", "North America", "World") & export_long$type == "daily_excess_deaths_per_100k", columns_to_export],
"output-data/output-for-interactive/by_location_per_100k.csv")
write_csv(export_long[!export_long$location %in% c("Africa", "Oceania", "Americas", "Asia", "Asia & Oceania", "Europe", "Latin America and Caribbean", "North America", "World") & export_long$type == "daily_excess_deaths_cumulative", columns_to_export], "output-data/output-for-interactive/by_location_cumulative.csv")
write_csv(export_long[!export_long$location %in% c("Africa", "Oceania", "Americas", "Asia", "Asia & Oceania", "Europe", "Latin America and Caribbean", "North America", "World") & export_long$type == "daily_excess_deaths_per_100k_cumulative", columns_to_export], "output-data/output-for-interactive/by_location_per_100k_cumulative.csv")
# By region:
# Select regions
regions_line_chart <- export_long[export_long$location %in% c("Africa", "Asia", "Oceania", "Europe", "Latin America and Caribbean", "North America"), columns_to_include]
# Inspect if desired
if(inspect){
ggplot(regions_line_chart[regions_line_chart$type == "daily_excess_deaths",],
aes(x=date))+
geom_area(aes(y=official_covid_deaths, fill=location))+
geom_line(aes(y=estimate,
col = "Estimated excess deaths"))+
geom_line(aes(y=estimate_top_95))+
geom_line(aes(y=estimate_bot_95))+
geom_line(aes(y=estimate_top_50, alpha= 0.5))+
geom_line(aes(y=estimate_bot_50, alpha= 0.5))+
theme_minimal()+theme(legend.position = "bottom", legend.title = element_blank())+
xlab("")+ylab("")+facet_grid(.~location)
}
# Write to files
write_csv(regions_line_chart[regions_line_chart$type == "daily_excess_deaths",
columns_to_export],
"output-data/output-for-interactive/regions_line_chart.csv")
write_csv(regions_line_chart[regions_line_chart$type == "daily_excess_deaths_cumulative",
columns_to_export],
"output-data/output-for-interactive/regions_line_chart_cumulative.csv")
write_csv(regions_line_chart[regions_line_chart$type == "daily_excess_deaths_per_100k",
columns_to_export],
"output-data/output-for-interactive/regions_line_chart_per_100k.csv")
write_csv(regions_line_chart[regions_line_chart$type == "daily_excess_deaths_per_100k_cumulative", columns_to_export], "output-data/output-for-interactive/regions_line_chart_per_100k_cumulative.csv")
# For the world:
world_line_chart <- export_long[export_long$location == "World", columns_to_include]
# Inspect if desired
if(inspect){
ggplot(world_line_chart[world_line_chart$type == "daily_excess_deaths_cumulative",],
aes(x=date))+
geom_area(aes(y=official_covid_deaths, fill=location))+
geom_line(aes(y=estimate,
col = "Estimated excess deaths"))+
geom_line(aes(y=estimate_top_95))+
geom_line(aes(y=estimate_bot_95))+
geom_line(aes(y=estimate_top_50, alpha= 0.5))+
geom_line(aes(y=estimate_bot_50, alpha= 0.5))+
theme_minimal()+theme(legend.position = "bottom", legend.title = element_blank())+
xlab("")+ylab("")+facet_grid(.~location)
}
# Write to files
write_csv(world_line_chart[world_line_chart$type == "daily_excess_deaths",
columns_to_export],
"output-data/output-for-interactive/world_line_chart.csv")
write_csv(world_line_chart[world_line_chart$type == "daily_excess_deaths_cumulative",
columns_to_export],
"output-data/output-for-interactive/world_line_chart_cumulative.csv")
write_csv(world_line_chart[world_line_chart$type == "daily_excess_deaths_per_100k",
columns_to_export],
"output-data/output-for-interactive/world_line_chart_per_100k.csv")
write_csv(world_line_chart[world_line_chart$type == "daily_excess_deaths_per_100k_cumulative", columns_to_export], "output-data/output-for-interactive/world_line_chart_per_100k_cumulative.csv")
# World top-line chart:
write_csv(world_line_chart[world_line_chart$type == "daily_excess_deaths_cumulative" & world_line_chart$date == max(world_line_chart$date, na.rm = T), columns_to_export], "output-data/output-for-interactive/world_most_recent_cumulative_deaths.csv")
# Step 6: Generate data for table A ------------------------------------------------------------------------------
# We here rely on "export_long" created above.
# Absolute terms:
# Select most recent and relevant columns from combined long data:
table_A_absolute <- export_long[export_long$date == max(export_long$date) & export_long$type == "daily_excess_deaths_cumulative", c("location",
"official_covid_deaths",
"estimate",
"estimate_top_95",
"estimate_bot_95",
"vaccinated_per_100")]
# Generate % difference between estimate and official deaths
table_A_absolute$difference_pct <- round(100*(table_A_absolute$estimate - table_A_absolute$official_covid_deaths) / table_A_absolute$official_covid_deaths, 1)
# Select correct column order:
table_A_absolute <- table_A_absolute[, c("location",
"official_covid_deaths",
"estimate",
"estimate_top_95",
"estimate_bot_95",
"difference_pct",
"vaccinated_per_100")]
# Per 100k:
# Select most recent and relevant columns from combined long data:
table_A_per_100k <- export_long[export_long$date == max(export_long$date) & export_long$type == "daily_excess_deaths_per_100k_cumulative", c("location",
"official_covid_deaths",
"estimate",
"estimate_top_95",
"estimate_bot_95",
"vaccinated_per_100")]
# Generate % difference between estimate and official deaths
table_A_per_100k$difference_pct <- round(100*(table_A_per_100k$estimate - table_A_per_100k$official_covid_deaths) / table_A_per_100k$official_covid_deaths, 1)
# Select correct column order:
table_A_per_100k <- table_A_per_100k[, c("location",
"official_covid_deaths",
"estimate",
"estimate_top_95",
"estimate_bot_95",
"difference_pct",
"vaccinated_per_100")]
colnames(table_A_per_100k) <- c("location",
"official_covid_deaths_per_100k",
"estimate_per_100k",
"estimate_top_95_per_100k",
"estimate_bot_95_per_100k",
"difference_pct",
"vaccinated_per_100")
colnames(table_A_absolute)[2:5] <- paste0(colnames(table_A_absolute)[2:5], "_absolute")
table_A <- merge(table_A_absolute, table_A_per_100k[, 1:5], by="location", all = T)
# Tweak the location name for Europe to indicate that the grouping includes the EU:
table_A$location[table_A$location == "Europe"] <- "Europe (incl. EU)"
# Fix to extremely small countries, conservatively rounding to nearest one using ceiling/floor for the confidence interval:
table_A$estimate_top_95_absolute[abs(table_A$estimate_top_95_absolute) < 1] <- ceiling(table_A$estimate_top_95_absolute[abs(table_A$estimate_top_95_absolute) < 1])
table_A$estimate_bot_95_absolute[abs(table_A$estimate_bot_95_absolute) < 1] <- floor(table_A$estimate_bot_95_absolute[abs(table_A$estimate_bot_95_absolute) < 1])
# Write to file
write_csv(table_A, "output-data/output-for-interactive/table_A.csv")
# Step 7: Generate data for second map ------------------------------------------------------------------------------
# Load data:
second_map <-
read_csv("output-data/output-for-interactive/main_map.csv")[, c(
"iso3c",
"date",
"cumulative_estimated_daily_excess_deaths_per_100k",
"cumulative_estimated_daily_excess_deaths_ci_95_top_per_100k",
"cumulative_estimated_daily_excess_deaths_ci_95_bot_per_100k"
)]
# Load estimated demography-adjusted IFR by iso3c:
ifr_by_iso <- readRDS("source-data/ifr_cache.RDS")
ifr_by_iso$iso2c[ifr_by_iso$area == "Namibia"] <- "NA"
ifr_by_iso$iso3c <- countrycode(ifr_by_iso$iso2c, "iso2c", "iso3c")
ifr_by_iso$demography_adjusted_ifr_percent <- ifr_by_iso$area_ifr
# Load estimated share of population over 65:
age_over_65 <-
unique(country_daily_data[, c("iso_code", "aged_65_older")])
age_over_65$iso3c <- age_over_65$iso_code
age_over_65$aged_65_older_pct <- age_over_65$aged_65_older
second_map <-
merge(second_map, na.omit(ifr_by_iso[, c("iso3c", "demography_adjusted_ifr_percent")]), all.x = T)
second_map <-
merge(second_map, na.omit(age_over_65[, c("iso3c", "aged_65_older_pct")]), all.x = T)
# Get implied infections per 100 persons:
second_map$implied_infections_per_100_persons <-
(1 / 1000) * second_map$cumulative_estimated_daily_excess_deaths_per_100k / (second_map$demography_adjusted_ifr_percent /
100)
second_map$implied_infections_per_100_persons_top_95 <-
(1 / 1000) * second_map$cumulative_estimated_daily_excess_deaths_ci_95_top_per_100k /
(second_map$demography_adjusted_ifr_percent / 100)
second_map$implied_infections_per_100_persons_bot_95 <-
(1 / 1000) * second_map$cumulative_estimated_daily_excess_deaths_ci_95_bot_per_100k /
(second_map$demography_adjusted_ifr_percent / 100)
# Ensure infections are not negative:
second_map$implied_infections_per_100_persons[second_map$implied_infections_per_100_persons < 0] <- 0
second_map$cumulative_estimated_daily_excess_deaths_ci_95_bot_per_100k[second_map$cumulative_estimated_daily_excess_deaths_ci_95_bot_per_100k < 0] <- 0
second_map$cumulative_estimated_daily_excess_deaths_ci_95_top_per_100k[second_map$cumulative_estimated_daily_excess_deaths_ci_95_top_per_100k < 0] <- 0
# Get estimated dead over 100k population over 65:
second_map$cumulative_estimated_daily_excess_deaths_per_100k_population_over_65 <-
second_map$cumulative_estimated_daily_excess_deaths_per_100k / (second_map$aged_65_older_pct /
100)
second_map$cumulative_estimated_daily_excess_deaths_per_100k_population_over_65_top_95 <-
second_map$cumulative_estimated_daily_excess_deaths_ci_95_top_per_100k / (second_map$aged_65_older_pct /
100)
second_map$cumulative_estimated_daily_excess_deaths_per_100k_population_over_65_bot_95 <-
second_map$cumulative_estimated_daily_excess_deaths_ci_95_bot_per_100k / (second_map$aged_65_older_pct /
100)
# Write to file:
write_csv(second_map,
"output-data/output-for-interactive/second_map.csv")
# Step 8: Generate data for table B ------------------------------------------------------------------------------
# Load data from table 1:
table_B <- read_csv("output-data/output-for-interactive/table_A.csv")
table_B$iso3c <- countrycode(table_B$location, "country.name", "iso3c")
table_B$iso3c[table_B$location == "Micronesia"] <- "FSM"
# Load data from map 2:
second_map_data <- read_csv("output-data/output-for-interactive/second_map.csv")
# Merge the two:
table_B <- merge(table_B, second_map_data, by = "iso3c", all.x = T)
# Generate columns:
# Demography adjusted deaths:
table_B$cumulative_estimated_daily_excess_deaths_per_100k_demography_adjusted <- table_B$cumulative_estimated_daily_excess_deaths_per_100k / (table_B$demography_adjusted_ifr_percent/mean(table_B$demography_adjusted_ifr_percent, na.rm = T))
# Ensure infections are not negative:
table_B$implied_infections_per_100_persons[table_B$implied_infections_per_100_persons < 0] <- 0
table_B$implied_infections_per_100_persons_top_95[table_B$implied_infections_per_100_persons_top_95 < 0] <- 0
table_B$implied_infections_per_100_persons_bot_95[table_B$implied_infections_per_100_persons_bot_95 < 0] <- 0
# Remove rows with no data:
table_B <- table_B[!(is.na(table_B$cumulative_estimated_daily_excess_deaths_per_100k_population_over_65) & is.na(table_B$implied_infections_per_100_persons)), ]
# Write to file:
write_csv(table_B[, c("location",
"iso3c",
"official_covid_deaths_per_100k",
"cumulative_estimated_daily_excess_deaths_per_100k",
"estimate_top_95_per_100k",
"estimate_bot_95_per_100k",
"cumulative_estimated_daily_excess_deaths_per_100k_population_over_65",
"cumulative_estimated_daily_excess_deaths_per_100k_demography_adjusted",
"implied_infections_per_100_persons",
"implied_infections_per_100_persons_top_95",
"implied_infections_per_100_persons_bot_95")],
"output-data/output-for-interactive/table_B.csv")
# Step 9: Histogram data ------------------------------------------------------------------------------
# This is exported in case people want more information on the distribution of predictions at the world cumulative level for the present day.
# Load raw histogram data
hist <- read_csv("output-data/export_world_cumulative_histogram_data.csv")
# Pivot to long format
hist <- pivot_longer(hist, cols = grep("B", colnames(hist)))
# Restrict to most recent date
hist <- hist[hist$date == max(hist$date), ]
# Construct equal-spaced groups
N_groups <- 50
groups <- seq(min(hist$value), max(hist$value), len=N_groups+1)
hist$bin_min <- NA
hist$bin_max <- NA
hist$bin <- NA
hist$bin_n <- 1
# Place observations in groups
for(i in 2:length(groups)){
hist$bin_min[hist$value >= groups[i-1] & hist$value <= groups[i]] <- groups[i-1]
hist$bin_max[hist$value >= groups[i-1] & hist$value <= groups[i]] <- groups[i]
hist$bin[hist$value >= groups[i-1] & hist$value <= groups[i]] <- i-1
if(sum(hist$bin == i-1, na.rm = T) == 0){
hist <- rbind(hist, c(NA, NA, NA, NA, NA, NA,
groups[i-1], groups[i], i-1, 0))
}
}
hist$world <- unique(na.omit(hist$world)[1])
hist$date <- unique(na.omit(hist$date)[1])
hist$estimate <- unique(na.omit(hist$estimate)[1])
# Sum observations per bin:
hist$bin_n <- ave(hist$bin_n, hist$bin, FUN = sum)
hist <- unique(hist[, c("world", "date", "estimate", "bin_min", "bin_max", "bin_n")])
if(inspect){
ggplot(hist)+geom_rect(aes(xmin=bin_min, xmax=bin_max, ymax=bin_n, ymin=0))+
theme_minimal()
}
# Write to file:
write_csv(hist,
"output-data/output-for-interactive/world_estimates_histogram.csv")
# Add timestamp:
tibble(timestamp = now(tzone = "UTC")) %>%
write_csv('output-data/output-for-interactive/timestamp.csv')
|
073f1cf747d785b612b055e3aebd4f9d6c1ddc91 | 799feab98fba14d85188ee1502855b6ccd18381e | /T1_BasicsofR.R | 774ea2b50d4dd7c6812dfbcb532651d3382fe2f0 | [] | no_license | Adarsh-Suresh-Patil/IST-687-Data-Science | 46a86bb75ea5921a77339d50dc03184bcdddd237 | 1dd171a0b465b0d69968afbee766fc0f39a97f4e | refs/heads/master | 2020-04-17T13:01:07.942316 | 2019-03-16T06:42:18 | 2019-03-16T06:42:18 | 166,599,160 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,024 | r | T1_BasicsofR.R | #Basics of R
#1. Define variables
#Define a vector with values 4.0, 3.3 and 3.7
grades <- c(4.0, 3.3, 3.7)
#Define vector with values "Bio", "Math", "History"
course <- c("Bio", "Math", "History")
#Define a variable of value 3
betterthanB <- 3
#2. Calculate statistics using R
#Avg of grades
avggrades <- mean(grades)
avggrades
#calculate length of vector
lengrades <- length(grades)
lengrades
#calculate sum of grades
sumgrades <- sum(grades)
sumgrades
#calculate min and max of grades
highestgrade <- max(grades)
highestgrade
lowestgrade <- min(grades)
lowestgrade
#add 0.3 to all elements of vector and calculate average
bettergrades <- grades + 0.3
bettergrades
avgbettergrades <- mean(bettergrades)
avgbettergrades
#Conditional IF statement in R
#calculate max of grades is higher than 3.5
if(highestgrade > 3.5) "Yes" else "No"
#calculate min of grades is higher than betterthanB
if (lowestgrade > betterthanB) "Yes" else "No"
#Accessing elements in a vector
#Access 2nd element of vector course
course[2]
|
74292c0c7f3963b690d2a66076b854208bad0dec | 56b4d541153ce86dadbfbdef9636d18be7de9903 | /4.data-viz.R | bd448b3a6f7fde9107afe527262257ca925f48aa | [] | no_license | tianchu-shu/data-analysis-viz | 9db4c243577917a6286425fd439e841102875958 | a5d19ff3edce031adf1584ba57327876d60f568c | refs/heads/master | 2020-03-21T18:35:52.432421 | 2018-10-09T01:16:23 | 2018-10-09T01:16:23 | 138,901,620 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 982 | r | 4.data-viz.R | #Data-viz
library(ggplot2)
library(ggvis)
library(tidyverse)
library(reshape2)
ggplot(df, aes(freq, fill_rate)) +
geom_point(aes(color = FY)) +
geom_smooth(se = FALSE) +
labs(title = "")
ggplot(data = melt(df), mapping = aes(x = value)) +
geom_histogram(bins = 30) + facet_wrap(~variable, scales = 'free_x')
ggplot(data = df) + geom_point(mapping = aes(x=VR, y=Invited, color=Q))
ggplot(data = df) +
+ geom_bar(mapping = aes(x = Region, fill = Sector),
position="dodge")
##
bar <- ggplot(data = df) +
geom_bar(mapping = aes(x = Region, fill = Sector),
show.legend = FALSE,
width = 1)
theme(aspect.ratio = 1) + labs(x = NULL, y = NULL)
bar + coord_flip()
bar + coord_polar()
##
ggplot(df, aes(Region, freq)) +
geom_point(aes(colour = Sector))
df %>% ggvis(~fill_rate, ~freq, fill = ~Region) %>% layer_points()
df %>% ggvis(~fill_rate, ~freq, fill = ~Sector) %>% layer_points() |
6600cc647766828955c73d6508af400a501e8a09 | 0e100f473781741b45e463c522cddf9812307fba | /run_analysis.R | c8138a80bc45167c99bc384ae4eee387b3b61ded | [] | no_license | raneykat/CourseraGetAndCleanProject | 9af4f3dec8f5ef115bc4a17313cd3d0d4d81363b | 8aa14348756cf5aa04f23391f09154e90d3c8643 | refs/heads/master | 2020-06-05T09:10:19.893102 | 2014-11-23T22:28:53 | 2014-11-23T22:28:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,210 | r | run_analysis.R | # Katherine Raney
# Getting and Cleaning Data Course Project
# November 2014
#setwd("C:\\raneykat_git\\CourseraGetAndCleanProject")
# libraries needed
library(reshape2)
library(tidyr)
library(dplyr)
# First, load the files
# APPLIES FOR BOTH TEST AND TRAIN DATA
# activity_labels
# this is a lookup dataset
cols <- c("activityId","activity")
activity_labels <- read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\activity_labels.txt",sep=" ",
col.names = cols)
# specify that activityId is a factor, not a measure
activity_labels$activityId <-as.factor(activity_labels$activityId)
# features
# this file has the column names for the x files
cols2 <- c("featureId","feature")
features <-read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\features.txt",sep="",
col.names=cols2)
# TEST DATASET
# this file has the row labels for the test data set
# they are the surrogate key values related to the activity_labels
y_test <- read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\test\\y_test.txt",
col.names=c("activityId"))
# activityId is a factor, not a measure
y_test$activityId <- as.factor(y_test$activityId)
# subject_test
# this file has surrogate keys that relate to the subjects of the test dataset
subject_test <- read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\test\\subject_test.txt",
col.names=c("subjectId"))
# subjectId is a factor not a measure
subject_test$subjectId <- as.factor(subject_test$subjectId)
#this file has the measures data for the test data set
x_test <- read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\test\\x_test.txt",sep = "",
quote = "")
#features$feature has the column names for this dataset
cols3 <- features[,2]
names(x_test) <- cols3
#y_test$activityId has the activity labels for this dataset, so we'll add that column
x_test$activityId <- y_test$activityId
#add subjectId to associate the subjects with the measures
x_test$subjectId <- subject_test$subjectId
#add dataset column to indicate this is test
x_test$dataset <- "test"
# TRAIN DATASET
#this file has the row labels for the train data set
# they are the surrogate key values related to the activity_labels
y_train <- read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\train\\y_train.txt",
col.names=c("activityId"))
# activityId is a factor, not a measure
y_train$activityId <- as.factor(y_train$activityId)
# subject_train
subject_train <- read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\train\\subject_train.txt",
col.names=c("subjectId"))
# subjectId is a factor not a measure
subject_train$subjectId <- as.factor(subject_train$subjectId)
#this file has the measures data for the train data set
x_train <- read.csv("getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset\\train\\x_train.txt",sep = "",
quote = "")
# cols3 (aka features$feature) has the column names for this dataset
names(x_train) <- cols3
#y_train$activityId has the activity labels for this dataset, so we'll add that column
x_train$activityId <- y_train$activityId
#add subjectId
x_train$subjectId <- subject_train$subjectId
#add dataset column to indicate this is train
x_train$dataset <- "train"
# Combined dataset
# bind x_test and x_train to form one dataset
x <- rbind_list(x_test,x_train)
# lets melt these measures into rows
# could use gather but melt seems faster
x <- melt(x,na.rm=TRUE,variable.name="feature",value.name="measure")
# keep only the rows where the feature contains mean or std
x <- x[grep("mean|std",x$feature),]
# chain some operations together here
x_tidy <- x %>%
#The measures were averaged by activity, subject, and feature
group_by(activityId,feature,subjectId) %>%
summarise(avg=mean(measure)) %>%
# The name of the activity was added as a new column, joining to the activity_labels lookup on activityId
inner_join(activity_labels,by = c("activityId")) %>%
select(activity,feature,subjectId,avg)
#The final aggregated dataset was output as tidy_output.txt
write.csv(x_tidy,file="x_tidy.txt")
|
0a618a76e5b971c47979036ca8d5bec5c8098586 | d81c8a628b7f197106debbb8d2b62dd56719e705 | /Project 3/word_cloud_jb.r | e5486eddd2525e7331616d3ce9e7429ffbf627fc | [] | no_license | ilyakats/CUNY-DATA607 | 77259bae9ccdb927c0c9a07bb1f5204da6a33f53 | 4ace18eb89e9cfc49cb816b3f612247b47aa6e15 | refs/heads/master | 2021-01-09T06:35:14.150125 | 2017-05-08T06:02:27 | 2017-05-08T06:02:27 | 81,013,259 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 464 | r | word_cloud_jb.r | # Jaan Bernberg
library(wordcloud2)
my_cloud_words <- all_data[,3:7] %>%
filter(YearCollected == 2017) %>%
select(SkillDescription, Amount) %>%
group_by(SkillDescription) %>%
summarise(Amount = sum(Amount)) %>%
arrange(desc(Amount)) %>% as.data.frame()
rownames(my_cloud_words) <- my_cloud_words$SkillDescription
wordcloud2(my_cloud_words, size = 1.5,
minRotation = -pi/6,
maxRotation = -pi/6,
rotateRatio = 1)
|
58794d2b14b7b7f272bb9de0ac060fc1b9ac29bc | 2dcdfa0d9adfac9453f360c6b2c9bc1c765b46a4 | /Operation_Dashboard_New/ui.R | 408adc9b5209d41d54fafdaf72d9dd908657f85e | [] | no_license | roymondliao/Shiny_Project | d056af95a3729a8625bc238ae6167613370ad9b4 | 14064bcb8c18811b4c29ad9012e2cc1feedbf256 | refs/heads/master | 2021-01-10T04:26:41.035961 | 2019-03-27T03:56:30 | 2019-03-27T03:56:30 | 53,765,381 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,712 | r | ui.R | ## Operaction dashboard - ui.R
library(shiny)
library(shinydashboard, warn.conflicts = FALSE)
library(htmlwidgets)
library(rCharts)
library(shinythemes)
library(DT)
options(shiny.maxRequestSize=10*1024^2)
## app.R
header <- dashboardHeader(title = "Operation Dashoard",
dropdownMenuOutput("messageMenu"),
## dropdown Menu for message
dropdownMenu(type = "notifications",
notificationItem(status = "success",
#from = "Support",
text = em(h4("If you have any question or problem"),
h4("please contact below:"),
h6("Email:[email protected]"),
h6("Wechat: Yuyu Liao"),
h6("Email:[email protected]"),
h6("Wechat: Frank Tseng")),
icon = icon("support")
))
)
sidebar <- dashboardSidebar(## icon from http://fontawesome.io/icons/ just use icon name,
## icon from http://getbootstrap.com/components/ use the last name after hyphen
sidebarMenu(
menuItem("Star Plot", tabName = "Star_Plot", icon = icon(name = "line-chart")),
menuItem("GP Daily Plot", tabName = "gp_daily_plot", icon = icon(name = "pie-chart"), badgeLabel = "new", badgeColor = "green"),
menuItem("OP Daily Report", tabName = "op_daily_report", icon = icon(name = "table")),
menuItem("Mail Daily Report", tabName = "mail_daily_report", icon = icon("envelope-o")),
menuItem("Reply File Upload", tabName = "auto_reply", icon = icon(name = "mail-reply-all")),
menuItem("GP AutoReply Type", tabName = "gp_autoreply_type", icon = icon("list-ol")),
menuItem("GP Platform", icon = icon("link"), href = "http://mp.ijinshan.com/login/login"),
tags$hr(),
selectizeInput("account_name", label = "Who you are?",
choices = c(Choose='', "Fox Liu" = "Fox.Liu", "Serene Zeng" = "Serene.Zeng", "Pandora Syu" = "Pandora.Syu", "Wing Ho" = "Wing.Ho"),
selected = NULL, options = list(placeholder = 'select name'), width = "250px"),
passwordInput(inputId = "account_password", label = "Enter the password:", value = "", width = "250"),
tags$head(
tags$style(HTML('#submit_account{color:#FFFFFF; background-color:#000000; font-family: Georgia, "Times New Roman",
Times, serif; margin: 13px; width:170px}'))
),
actionButton(inputId = "submit_account", label = "Touch me", icon = icon(name = "thumbs-o-up", class = "fa-1x")),
#submitButton(text = "Touch me", icon = icon(name = "thumbs-o-up", class = "fa-1x"), width = validateCssUnit("200")),
tags$hr(),
tags$head(
tags$style(HTML('#account{color:#000000; background-color:#FFFFFF; font-family: Georgia, "Times New Roman",
Times, serif; margin: 13px; width:170px; font-size: 135%}'))
),
verbatimTextOutput(outputId = "account")),
width = 200
)
body <- dashboardBody(# Boxes need to be put in a row (or column)
tabItems(
#### Star plot page
tabItem(tabName = "Star_Plot",
fluidRow(
box(title = "Date range", width = 4, height = "500px", status = "primary", solidHeader = TRUE,
br(),
dateRangeInput("input_date", label = NULL, start = Sys.Date()-7, end = Sys.Date()-1),
tags$head(
tags$style(HTML('#submit_Star_plot{color:#FFFFFF; background-color:#0066CC; width:125px}'))
),
actionButton(inputId = "submit_Star_plot", label = "Sumbit"),
#submitButton(text = "Submit", icon("refresh")),
br(),
helpText(h2(strong("Note:")),
p("The GP's Star data is update at 18:00 every day."),
p("The data are from", a("http://mp.ijinshan.com/")))
),
box(title = "Star Plot", width = 8, height = "500px", status = "primary", solidHeader = TRUE,
showOutput(outputId = "star_plots", "highcharts")),
box(tile = "Data Table", width = 12, height = "450px", DT::dataTableOutput("star_data"))
)),
#### GP daily plot page
tabItem(tabName = "gp_daily_plot",
fluidRow(
box(title = "All function feedback percentage", status = "danger", solidHeader = TRUE, width = 8, height = 500,
showOutput("main_function_plot", "highcharts")),
box(title = "Date range", status = "danger", solidHeader = TRUE, width = 4, height = 200,
dateRangeInput("input_date_gp", label = NULL, start = Sys.Date()-7, end = Sys.Date()-1, separator = "-"),
tags$head(
tags$style(HTML('#submit_gp_daily{color:#FFFFFF; background-color:#FF3333; width:125px}'))
),
actionButton(inputId = "submit_gp_daily", label = "Submit")
#submitButton(text = "Submit", icon("refresh"))
)
),
fluidRow(
tabBox(title = NULL, side = "left", width = 12, height = 600,
#ollapsible = TRUE, solidHeader = TRUE, status = "danger",
tabPanel(title = "Main Plots",
column(4, showOutput("applock_plot", "highcharts")),
column(4, showOutput("scan_plot", "highcharts")),
column(4, showOutput("private_plot", "highcharts")),
column(4, showOutput("junk_plot", "highcharts")),
column(4, showOutput("callblock_plot", "highcharts"))),
tabPanel(title = 'Applock',
column(8, showOutput("applock_line_plot", "highcharts")),
column(4, plotOutput("apploc_wc"))),
tabPanel(title = 'Scan',
column(8, showOutput("scan_line_plot", "highcharts")),
column(4, plotOutput("scan_wc"))),
tabPanel(title = 'Private',
column(8, showOutput("private_line_plot", "highcharts")),
column(4, plotOutput("private_wc"))),
tabPanel(title = 'Junk',
column(8, showOutput("junk_line_plot", "highcharts")),
column(4, plotOutput("junk_wc"))),
tabPanel(title = 'Callblock',
column(8, showOutput("callblock_line_plot", "highcharts")),
column(4, plotOutput("callblock_wc")))
)
),
fluidRow(
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Applock", DT::dataTableOutput("applock_dt", width = 1000)),
tabPanel("Scan", DT::dataTableOutput("scan_dt", width = 1000)),
tabPanel("Private", DT::dataTableOutput("private_dt", width = 1000)),
tabPanel("Junk", DT::dataTableOutput("junk_dt", width = 1000)),
tabPanel("Callblock", DT::dataTableOutput("callblock_dt", width = 1000))
))
)
),
#### GP and Mail Auto reply page
tabItem(tabName = "auto_reply",
fluidRow(
box(title = "GP Platform auto reply system", status = "info", solidHeader = TRUE, width = 4, height = 300,
#selectizeInput("select_op_name_gp", label = "Who you are?",
# choices = c(Choose='', "Fox Liu" = "Fox.Liu", "Serene Zeng" = "Serene.Zeng", "Pandora Syu" = "Pandora.Syu", "Wing Ho" = "Wing.Ho"),
# selected = NULL, options = list(placeholder = 'select name')),
fileInput(inputId = "input_file", label = "Please upload GP-reply file", accept = c(".xlsx", ".csv"))),
#submitButton(text = "Submit")),
box(title = "Upload GP-File response massage :", width = 8, height = 300,
verbatimTextOutput(outputId = "update_info_gp")),
box(title = "Mail Platform auto reply system", status = "primary", solidHeader = TRUE, width = 4, height = "300px",
#selectizeInput("select_op_name_mail", label = "Who you are?",
# choices = c(Choose='', "Fox Liu" = "Fox.Liu", "Serene Zeng" = "Serene.Zeng", "Pandora Syu" = "Pandora.Syu", "Wing Ho" = "Wing.Ho"),
# selected = NULL, options = list(placeholder = 'select name')),
fileInput(inputId = "input_file_mail", label = "Please upload Mail-reply file", accept = c(".xlsx", ".csv"))),
#submitButton(text = "Submit")),
box(title = "Upload Mail-File response massage :", width = 8, height = "300px",
verbatimTextOutput(outputId = "update_info_mail"))
)),
#### OP daily report page
tabItem(tabName = "op_daily_report",
fluidRow(
box(title = "Upload file", width = 3, height = "180px", status = "success", solidHeader = TRUE,
fileInput(inputId = "input_file_op", label = NULL, accept = c(".xlsx", ".csv"))
#tags$head(
# tags$style(HTML('#Submit_op_daily_report_uploadfile{color:#FFFFFF; background-color:#000000; width:125px}'))
#),
#actionButton(inputId = "Submit_op_daily_report_uploadfile", label = "Submit")
),
box(title = "Upload File Message", verbatimTextOutput(outputId = "update_info_op"),
width = 3, height = "180px", status = "success", solidHeader = TRUE),
box(title = "Date range", width = 3, height = "180px", status = "success", solidHeader = TRUE,
dateRangeInput("input_date_op", label = NULL, start = Sys.Date()-2, end = Sys.Date()-1, separator = "-"),
tags$head(
tags$style(HTML('#Submit_op_daily_report{color:#000000; background-color:#009900; width:125px}'))
),
actionButton(inputId = "Submit_op_daily_report", label = "Submit")
#submitButton(text = "Submit", icon("refresh"))
),
box(title = "Choose the fucntion",
selectInput("dataset", label = NULL,
choices = c("applock", "callblock", "cms", "phone performance", "scan", "secret box", "suggest")),
downloadButton('downloadData', 'Download'),
width = 3, height = "180px", status = "success", solidHeader = TRUE)
),
fluidRow(
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Applock", DT::dataTableOutput("applock", width = validateCssUnit(1000))),
tabPanel("Scan", DT::dataTableOutput("scan", width = 1000)),
tabPanel("CMS", DT::dataTableOutput("cms", width = 1000)),
tabPanel("Callblock", DT::dataTableOutput("callblock", width = 1000)),
tabPanel("Private Browser", DT::dataTableOutput("private_browser", width = 1000)),
tabPanel("Phone Performance", DT::dataTableOutput("phone_performance", width = 1000)),
tabPanel("Secret box", DT::dataTableOutput("secret_box", width = 1000)),
tabPanel("Suggest", DT::dataTableOutput("suggest", width = 1000))),
width = 12),
position = "left"
)),
### gp autoreply type
tabItem(tabName = "gp_autoreply_type",
fluidRow(
box(title = "Enter Auto Reply Context:", width = 4, height = "250px", status = "info", solidHeader = TRUE,
textInput(inputId = "input_reply_type_name", label = "Type Name (Ex: enter 'typeP')", value = ""),
textInput(inputId = "input_reply_type_context", label = "Type Context (Ex: enter 'what ever you want')", value = "")),
box(title = "Submit and Result", width = 4, height = "250px", status = "info", solidHeader = TRUE,
#selectizeInput("select_name_gp_type", label = "Who you are?",
# choices = c(Choose='', "Fox Liu" = "Fox.Liu", "Serene Zeng" = "Serene.Zeng", "Pandora Syu" = "Pandora.Syu", "Wing Ho" = "Wing.Ho"),
# selected = NULL, options = list(placeholder = 'Taiwan No.1')),
#passwordInput(inputId = "input_name_gp_type_password", label = "Enter your password:", value = "", width = validateCssUnit(400)),
tags$head(
tags$style(HTML('#Submit_gp_autoreply{color:#000000; background-color:#00CCFF; width:125px}'))
),
actionButton(inputId = "Submit_gp_autoreply", label = "Submit"),
tags$hr(),
verbatimTextOutput(outputId = "edit_gp_type_data")
)
),
fluidRow(
box(
#actionButton(inputId = "refresh_action", label = "Refresh Type Table", icon = icon(name = "refresh", class = "fa-1x")),
title = "The Auto Reply List:", width = 12, hieght = "500", status = "info", solidHeader = TRUE,
DT::dataTableOutput("output_reply_table_df"))
#fluidRow(
# box(title = "testing", width = 6,
# verbatimTextOutput("output_edit_reply_type"))
#)
)),
### mail daily report
tabItem(tabName = "mail_daily_report",
fluidRow(
box(title = "Enter modify content", width = 4, height = "430px", status = "info", solidHeader = TRUE,
dateInput(inputId = "input_mail_date", labe = "Select Date:", value = Sys.Date()-1, min = "2015-12-14", format = "yyyy-mm-dd", width = validateCssUnit(400)),
textInput(inputId = "input_mail_index", label = "Enter Index:", value = NULL, width = validateCssUnit(400)),
textInput(inputId = "input_mail_owner", label = "Enter Owner:", value = NULL, width = validateCssUnit(400)),
textInput(inputId = "input_mail_track", label = "Enter Track:", value = NULL, width = validateCssUnit(400)),
textInput(inputId = "input_mail_status", label = "Enter Status:", value = NULL, width = validateCssUnit(400))),
#box(title = NULL, width = 4, height = "430px", status = "info", solidHeader = FALSE,
# selectizeInput("select_name_mail", label = "Who you are?",
# choices = c(Choose='', "Fox Liu" = "Fox.Liu", "Serene Zeng" = "Serene.Zeng", "Pandora Syu" = "Pandora.Syu", "Wing Ho" = "Wing.Ho"),
# selected = NULL, options = list(placeholder = 'who is the most beautiful girl in the world?')),
# passwordInput(inputId = "input_name_mail_password", label = "Enter your password:", value = "", width = validateCssUnit(400))
#submitButton(text = "Touch me", icon = icon(name = "gavel", class = "fa-1x"), width = validateCssUnit("50%"))
# ),
box(title = "Submit and Result", width = 4, height = "430px", status = "info", solidHeader = TRUE,
tags$head(
tags$style(HTML('#submit_mail_daily{color:#000000; background-color:#00CCFF; width:125px}'))
),
actionButton(inputId = "submit_mail_daily", label = "Submit"),
tags$hr(),
verbatimTextOutput(outputId = "modifyMailPlatform_dt")),
box(title = NULL, width = 12, height = "1000px", status = "info", solidHeader = FALSE,
DT::dataTableOutput("output_mail_table_df"))
))
))
ui <- dashboardPage(header = header, body = body, sidebar = sidebar, skin = "yellow")
#shinyApp(ui, server)
|
46b8d02333bd03922ec9f1ff32ee102d2ef48080 | 8eb45bfd8ebc817911b23e74cce42cf506c85b54 | /principalMochila.R | e3fe4f8aef246eaa8593896eb799489d3eafa2e4 | [] | no_license | vitoriacfaria/aulas-franco-inteligencia-artificial | 161cb1950447af0bd0cf2072ff0145b4bc72c341 | 3d2fd3aea3d3103b04775702a3c3ed8370cc26b3 | refs/heads/master | 2022-12-10T11:49:11.702438 | 2020-09-10T00:43:59 | 2020-09-10T00:43:59 | 294,262,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 442 | r | principalMochila.R | # Instalando o pacote GA
install.packages("GA")
# Carregando o pacote GA
library("GA")
# Algoritmo genetico
resultado = ga("binary",
fitness = fAdaptMO,
nBits = 8,
popSize = 10,
maxiter = 20,
names = items)
#verificacao de resultados
summary(resultado)
#Saida da solucao
summary(resultado)$solution
# Plotando o grafico da evolucao da avaliacao
plot(resultado) |
e26fb0ee3521950aad58189d6f316e0454e870c6 | 569c5a551a0ee233d923c3c75eb0e10f98a679ea | /4_publish_api.R | 22c9f72d9665c323aa2ce99489625d83650e37f0 | [] | no_license | DataStrategist/immunotherapy | 234f0d5d4edcac2b0a6371bc6356142991570334 | 46512de99225dbefc1eee58946f81bd91ad68c03 | refs/heads/master | 2020-07-31T14:47:30.761687 | 2019-06-13T08:56:48 | 2019-06-13T08:56:48 | 210,641,248 | 0 | 0 | null | 2019-09-24T15:50:27 | 2019-09-24T15:50:26 | null | UTF-8 | R | false | false | 484 | r | 4_publish_api.R |
# Since the API is defined by `plumber/plumber.R`, i.e. inside a subfolder,
# first copy the `config.yml` to the `plumber` folder
fs::file_copy("config.yml", "plumber/config.yml", overwrite = TRUE)
library(rsconnect)
withr::with_dir(
"plumber",
rsconnect::deployAPI(
api = ".",
# server = "{server}", # <<- edit this line if necessary
# account = "{account}", # <<- edit this line if necessary
appTitle = "Immunotherapy API",
forceUpdate = TRUE
)
)
|
5d3ba3c108d5531b78e21f1ecfa60c7c69e236d6 | 809754c7533aaa3e19d327f3bc7fd57ba5e6c082 | /man/STR_analysis.Rd | 754e65d151e0d98192f04a025320d34e139a37bd | [] | no_license | cran/STRAH | cb72112f9dbb4551762356ba4b587a26e7389683 | 0954be0ce19ffd2f0a65a83e049db598277d2fb3 | refs/heads/master | 2020-12-22T19:10:48.560514 | 2019-04-09T14:05:25 | 2019-04-09T14:05:25 | 236,902,944 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,491 | rd | STR_analysis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STR_analysis.R
\name{STR_analysis}
\alias{STR_analysis}
\title{Analysis of short tandem repeats (STRs) in a given region of any reference genome}
\usage{
STR_analysis(seqName, nr.STRs = 10, nr.mismatch = 0, chrs, STR = "A",
lens.grey = 0:5 * 1000, start.position = NA, end.position = NA,
reverse.comp = FALSE, bed_file, pos_matrix, output_file,
species = BSgenome.Hsapiens.UCSC.hg19::Hsapiens,
dsb_map = STRAH::dsb_map)
}
\arguments{
\item{seqName}{A character string which is the name of the given sequence file under study. Can also be set to "" in order to analyze a defined sequence from any reference genome such as the package BSgenome.Hsapiens.UCSC.hg19 for humans.}
\item{nr.STRs}{An integer value reflecting the minimum length of STRs to be searched for.}
\item{nr.mismatch}{An integer value reflecting the allowed number of mismatches of the short tandem repeats. By default it set to 0.}
\item{chrs}{A string reflecting the chromosome under study (starting with "chr" and adding either the integers from 1-22 or "X" respectively "Y" for the human genome). This argument can also be a vector of strings to study several chromosomes.}
\item{STR}{A character string for the nucleotide to be searched for. By default one searches for poly-As, hence set to "A".}
\item{lens.grey}{An integer value which is by default a vector of 6 integer values. These values represent the greyzones to be studied left and right from the hotspot regions.}
\item{start.position}{An integer value reflecting the start position of the region to be analyzed. If set to \code{NA} the analysis starts from the beginning of the chromosome.}
\item{end.position}{An integer value reflecting the end position of the region to be analyzed. If set to \code{NA} the analysis is performed until the end of the chromosome.}
\item{reverse.comp}{A logical value by default \code{FALSE}. If set to \code{TRUE} then the reverse complement of the sequence is analyzed.}
\item{bed_file}{A bed file containing the chromosomes, start, and end positions of the region(s) that should be analyzed.}
\item{pos_matrix}{A matrix or dataframe containing the chromosomes, start, and end positions of the region(s) that should be analyzed.}
\item{output_file}{The default is an empty string and does not save an output-file. The output will be saved if the parameter is changed to a user defined string excluding the extension (by default .bed).}
\item{species}{The human genome (version 19) is default but an alternative genome can be provided. For chimpanzees the parameter has to be BSgenome.Ptroglodytes.UCSC.panTro5 (given that the data is installed).}
\item{dsb_map}{The DSB map of the human genome (version 19) is default but an alternative DSB map from a different genome can be provided. This parameter needs to be a data frame with at least 3 columns that contains the chromosome, start, and end position of the DSB. The DSB map for chimpanzees is included in the package.}
}
\value{
The output of the function is a list with the following content:
\item{Sequence Name}{The chromosome with the starting and end position of the region under study is provided.}
\item{Reverse Complement}{An indicator whether the reverse complement was considered}
\item{Number of allowed Mismatches}{The number of allowed mismatches is provided.}
\item{Minimum Length}{The minimum length of the STR to be extracted is provided.}
\item{Number of Matches}{The total number of STR matches of the region is provided.}
\item{Length of STR stretch in bp}{A vector containing the length of STRs per match is provided.}
\item{Start positions}{The starting positions of the STRs are provided.}
\item{Zone}{The zones where the STR is found are provided. 1 reflects within a hotspot, the last integer reflects that it is outside, and the integers between these two reflect the given flanking regions starting with 2 as the next closest region to the hotspot.}
A BED file with the chromosomes, start, and end position of the STRs, length of the STR stretch, the zone where the STR was found, and the specified region that was analyzed are given as columns.
}
\description{
This function separates detected short tandem repeats (STRs) into different zones. These zones are either the hotspot zone defined by the double strand break maps of Pratto et al. (2014) or adjacent flanking zones (greyzones) left and right of the hotspots of user specified lengths. The parameters of the regions under study can be directly given in the function arguments or read in via either a BED-file or a position matrix.
}
\examples{
data(chr6_1580213_1582559)
STR_analysis(seqName = chr6_1580213_1582559, nr.STRs = 10, nr.mismatch = 0, chrs = "chr6",
STR = "A", lens.grey = 0:1*100, start.position = 1580213, end.position = 1582559,
reverse.comp = FALSE,
species = BSgenome.Hsapiens.UCSC.hg19::Hsapiens, dsb_map = STRAH::dsb_map)
\donttest{
STR_analysis(nr.STRs = 10, nr.mismatch = 0, chrs = "chr22", STR = "A", lens.grey = 0:1*100,
start.position = 30000000, end.position = 31000000, reverse.comp = FALSE,
species = BSgenome.Hsapiens.UCSC.hg19::Hsapiens, dsb_map = STRAH::dsb_map)
# If you want to use the function with a different reference genome
# make your choice and install it before:
if(requireNamespace("BSgenome.Ptroglodytes.UCSC.panTro5")) {
STR_analysis(nr.STRs = 10, nr.mismatch = 0, chrs = "chr22", STR = "A", lens.grey = 0:5*1000,
start.position = 30000000, end.position = 31000000, reverse.comp = FALSE,
species = BSgenome.Ptroglodytes.UCSC.panTro5::BSgenome.Ptroglodytes.UCSC.panTro5,
dsb_map = STRAH::dsb_map_chimp_full)
}
}
}
\references{
Heissl, A., et al. (2018) Length asymmetry and heterozygosity strongly influences the evolution of poly-A microsatellites at meiotic recombination hotspots. doi: https://doi.org/10.1101/431841
Pratto, F., et al. (2014). Recombination initiation maps of individual human genomes. Science, 346(6211).
Kuhn RM, et al. (2013) The UCSC genome browser and associated tools, Brief. Bioinform., 14, 144-161.
}
\seealso{
\code{\link{getflank2}}, \code{\link{STR_detection}}
}
\author{
Philipp Hermann, \email{[email protected]}, Monika Heinzl, \email{[email protected]}
Angelika Heissl, Irene Tiemann-Boege, Andreas Futschik
}
\keyword{array}
\keyword{datasets}
\keyword{list}
\keyword{methods}
\keyword{univar}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.