diff --git a/.github/workflows/pkgdown.yaml b/.github/workflows/pkgdown.yaml new file mode 100644 index 0000000..a7276e8 --- /dev/null +++ b/.github/workflows/pkgdown.yaml @@ -0,0 +1,48 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + release: + types: [published] + workflow_dispatch: + +name: pkgdown + +jobs: + pkgdown: + runs-on: ubuntu-latest + # Only restrict concurrency for non-PR jobs + concurrency: + group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + + - uses: r-lib/actions/setup-pandoc@v2 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::pkgdown, local::. + needs: website + + - name: Build site + run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) + shell: Rscript {0} + + - name: Deploy to GitHub pages 🚀 + if: github.event_name != 'pull_request' + uses: JamesIves/github-pages-deploy-action@v4.5.0 + with: + clean: false + branch: gh-pages + folder: docs diff --git a/DESCRIPTION b/DESCRIPTION index 651c949..30a774a 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -54,3 +54,4 @@ Config/testthat/edition: 3 Encoding: UTF-8 Roxygen: list(markdown = TRUE) RoxygenNote: 7.3.1 +URL: https://hta-pharma.github.io/chef/ diff --git a/R/add_event_index.R b/R/add_event_index.R index 089f10e..c1b2849 100644 --- a/R/add_event_index.R +++ b/R/add_event_index.R @@ -18,8 +18,7 @@ #' #' @noRd -create_flag <- function(dat, var_value_pairs=NULL, singletons=NULL) { - +create_flag <- function(dat, var_value_pairs = NULL, singletons = NULL) { filter_str <- construct_data_filter_logic( var_value_pairs = var_value_pairs, @@ -65,11 +64,16 @@ add_event_index <- function(ep, analysis_data_container) { ep_out[, event_index := llist( create_flag( dat[[1]], - var_value_pairs = list(c(pop_var[[1]], pop_value[[1]]), - c(period_var[[1]], period_value[[1]])), - singletons = c(endpoint_filter[[1]], - endpoint_group_filter[[1]], - custom_pop_filter[[1]]) - )), by = endpoint_id] - ep_out[, dat:=NULL] + var_value_pairs = list( + c(pop_var[[1]], pop_value[[1]]), + c(period_var[[1]], period_value[[1]]) + ), + singletons = c( + endpoint_filter[[1]], + endpoint_group_filter[[1]], + custom_pop_filter[[1]] + ) + ) + ), by = endpoint_id] + ep_out[, dat := NULL] } diff --git a/R/apply_criterion.R b/R/apply_criterion.R index 8a43288..d4aebb2 100644 --- a/R/apply_criterion.R +++ b/R/apply_criterion.R @@ -177,7 +177,7 @@ unnest_ep_by_strata <- function(ep) { crit_accept_endpoint <- strata_var <- stratify_by <- NULL # To satisfy R CMD check ep_accepted <- ep[(crit_accept_endpoint)] - ep_accepted[,strata_var := stratify_by] + ep_accepted[, strata_var := stratify_by] ep_unnested <- tidyr::unnest(ep_accepted, col = strata_var) |> setDT() diff --git a/R/apply_stats.R b/R/apply_stats.R index e49fd35..349330f 100644 --- a/R/apply_stats.R +++ b/R/apply_stats.R @@ -39,7 +39,7 @@ apply_stats <- # produced nm <- names(ep) if (length(nm) <= 3 && - nm[1] == "SKIP_") { + nm[1] == "SKIP_") { return(data.table(NULL)) } type <- match.arg(type) @@ -48,8 +48,7 @@ apply_stats <- ep_cp <- ep[analysis_data_container] if (type == "stat_by_strata_by_trt") { - - if (nrow(ep_cp[crit_accept_by_strata_by_trt == TRUE]) == 0){ + if (nrow(ep_cp[crit_accept_by_strata_by_trt == TRUE]) == 0) { ep_cp[, stat_result := list()] } else { ep_cp[crit_accept_by_strata_by_trt == TRUE, stat_result := llist( @@ -70,10 +69,8 @@ apply_stats <- ) ), by = stat_result_id] } - } else if (type == "stat_by_strata_across_trt") { - - if (nrow(ep_cp[crit_accept_by_strata_across_trt == TRUE]) == 0 ){ + if (nrow(ep_cp[crit_accept_by_strata_across_trt == TRUE]) == 0) { ep_cp[, stat_result := list()] } else { ep_cp[crit_accept_by_strata_across_trt == TRUE, stat_result := llist( @@ -87,17 +84,16 @@ apply_stats <- event_index = unlist(event_index), cell_index = unlist(cell_index), subjectid_var = "USUBJID" - ), + ), validator = validate_stat_output, expr_name = fn_name ) ), by = stat_result_id] } - } else if (type == "stat_across_strata_across_trt"){ - if (nrow(ep_cp[crit_accept_by_strata_across_trt == TRUE]) == 0 ){ + } else if (type == "stat_across_strata_across_trt") { + if (nrow(ep_cp[crit_accept_by_strata_across_trt == TRUE]) == 0) { ep_cp[, stat_result := list()] } else { - ep_cp[crit_accept_by_strata_across_trt == TRUE, stat_result := llist( expr_ = try_and_validate( fn_callable[[1]]( @@ -117,5 +113,4 @@ apply_stats <- keep <- setdiff(names(ep_cp), c("fn_callable", "dat", "tar_group")) ep_cp[, .SD, .SDcols = keep] - } diff --git a/R/check_duplicate_functions.R b/R/check_duplicate_functions.R index ea214b3..b2e1ee9 100644 --- a/R/check_duplicate_functions.R +++ b/R/check_duplicate_functions.R @@ -7,7 +7,7 @@ #' @export #' check_duplicate_functions <- function(dir) { - if(!dir.exists(dir)){ + if (!dir.exists(dir)) { stop(paste0("Directory ", dir, " does not exist")) } dir_norm <- normalizePath(dir) @@ -16,8 +16,9 @@ check_duplicate_functions <- function(dir) { fn_names_ls <- lapply(x, function(i) { lang_objs <- Filter(is.language, parse(i)) fun_entries <- - Filter(function(x) - grepl(", function", toString(x)), lang_objs) + Filter(function(x) { + grepl(", function", toString(x)) + }, lang_objs) sapply(fun_entries, function(fun_entry_i) { trimws(strsplit(toString(fun_entry_i), ",")[[1]][2]) }) @@ -37,6 +38,4 @@ check_duplicate_functions <- function(dir) { "\n\n Please change the name so there are no duplicated names, otherwise it will be unclear which function will be used in the program.\n" ) ) - - } diff --git a/R/construct_data_filter_logic.R b/R/construct_data_filter_logic.R index c3f5421..6bd06ed 100644 --- a/R/construct_data_filter_logic.R +++ b/R/construct_data_filter_logic.R @@ -27,15 +27,12 @@ construct_data_filter_logic <- } if (!is.null(singletons)) { singletons_no_na <- singletons[!sapply(singletons, is_null_or_na)] - if(length(singletons_no_na)>0){ + if (length(singletons_no_na) > 0) { singletons_collapsed <- paste0(singletons_no_na, collapse = " & ") } - } - paste0(c(pairs, singletons_collapsed),collapse = " & ") - - + paste0(c(pairs, singletons_collapsed), collapse = " & ") } -is_null_or_na <- function(x){ - is.null(x)||is.na(x) +is_null_or_na <- function(x) { + is.null(x) || is.na(x) } diff --git a/R/eval_fn.R b/R/eval_fn.R index 27f2da6..1df19c9 100644 --- a/R/eval_fn.R +++ b/R/eval_fn.R @@ -1,11 +1,13 @@ - eval_data_fn <- function(fn_list, ...) { out <- lapply(fn_list, function(fn_) { - x <- tryCatch({ - fn_(...) # apply the function i - }, error = function(e) { - return(e) - }) + x <- tryCatch( + { + fn_(...) # apply the function i + }, + error = function(e) { + return(e) + } + ) if (inherits(x, "simpleError") || inherits(x, "error")) { return(list( @@ -15,8 +17,8 @@ eval_data_fn <- function(fn_list, ...) { )) } - x[, "TOTAL_":="total"] - x[, "INDEX_":= .I] + x[, "TOTAL_" := "total"] + x[, "INDEX_" := .I] setkey(x, "INDEX_") return(list( @@ -24,11 +26,9 @@ eval_data_fn <- function(fn_list, ...) { error_flag = FALSE, error_message = NULL )) - }) purrr::transpose(out) - } #' Evaluate Endpoint Criteria @@ -44,21 +44,24 @@ eval_data_fn <- function(fn_list, ...) { eval_criteria_endpoint <- function(fn, ...) { dots <- list(...) result <- fn( - dat = dots$dat, - event_index = dots$event_index, - treatment_var = dots$treatment_var, - treatment_refval = dots$treatment_refval, - period_var = dots$period_var, - period_value = dots$period_value, - endpoint_filter = dots$endpoint_filter, - endpoint_group_metadata = dots$endpoint_group_metadata, - stratify_by = dots$stratify_by, - subjectid_var = dots$subjectid_var) + dat = dots$dat, + event_index = dots$event_index, + treatment_var = dots$treatment_var, + treatment_refval = dots$treatment_refval, + period_var = dots$period_var, + period_value = dots$period_value, + endpoint_filter = dots$endpoint_filter, + endpoint_group_metadata = dots$endpoint_group_metadata, + stratify_by = dots$stratify_by, + subjectid_var = dots$subjectid_var + ) if (!(isTRUE(result) | - isFALSE(result))) { - stop("The return value from the endpoint criterion function must be a logical of length 1, i.e.", - "TRUE or FALSE") + isFALSE(result))) { + stop( + "The return value from the endpoint criterion function must be a logical of length 1, i.e.", + "TRUE or FALSE" + ) } result } @@ -86,11 +89,14 @@ eval_criteria_subgroup <- function(fn, ...) { endpoint_filter = dots$endpoint_filter, endpoint_group_metadata = dots$endpoint_group_metadata, strata_var = dots$strata_var, - subjectid_var = dots$subjectid_var) + subjectid_var = dots$subjectid_var + ) if (!(isTRUE(result) | - isFALSE(result))) { - stop("The return value from the endpoint criterion function must be a logical of length 1, i.e.", - "TRUE or FALSE") + isFALSE(result))) { + stop( + "The return value from the endpoint criterion function must be a logical of length 1, i.e.", + "TRUE or FALSE" + ) } result } diff --git a/R/expand_endpoints.R b/R/expand_endpoints.R index c031196..2e2fb01 100644 --- a/R/expand_endpoints.R +++ b/R/expand_endpoints.R @@ -24,16 +24,18 @@ expand_over_endpoints <- function(ep, analysis_data_container) { endpoint_spec_id <- endpoint_label_evaluated <- key_analysis_data <- NULL # To satisfy R CMD check - ep_with_data <- ep[analysis_data_container] ep_with_data[, expand_specification := llist(define_expanded_ep(dat[[1]], group_by[[1]])), - by = 1:nrow(ep_with_data)] + by = 1:nrow(ep_with_data) + ] ep_with_data[["dat"]] <- NULL # Expand by groups. If no grouping is present, then add empty group related columns - if(any(!is.na(ep_with_data$expand_specification))){ - ep_exp <- ep_with_data %>% tidyr::unnest(col = expand_specification) %>% setDT() - }else{ + if (any(!is.na(ep_with_data$expand_specification))) { + ep_exp <- ep_with_data %>% + tidyr::unnest(col = expand_specification) %>% + setDT() + } else { ep_exp <- ep_with_data[, .SD, .SDcols = setdiff(names(ep_with_data), "expand_specification")] ep_exp[, endpoint_group_filter := NA] ep_exp[, endpoint_group_metadata := list()] @@ -43,8 +45,7 @@ expand_over_endpoints <- function(ep, analysis_data_container) { # Complete endpoint labels by replacing keywords with values nm_set <- names(ep_exp) - ep_exp[,endpoint_label_evaluated := apply(ep_exp, 1, function(x){ - + ep_exp[, endpoint_label_evaluated := apply(ep_exp, 1, function(x) { xlab <- x[["endpoint_label"]] # Replace keywords. Do only accept keywords which reference to either @@ -53,9 +54,11 @@ expand_over_endpoints <- function(ep, analysis_data_container) { if (grepl(paste0("<", i, ">"), xlab)) { if (is.character(x[[i]]) || is.numeric(x[[i]])) { xlab <- - xlab %>% gsub(paste0("<", i, ">"), - paste0(str_to_sentence_base(x[[i]]), collapse = ","), - .) + xlab %>% gsub( + paste0("<", i, ">"), + paste0(str_to_sentence_base(x[[i]]), collapse = ","), + . + ) } } } @@ -67,9 +70,11 @@ expand_over_endpoints <- function(ep, analysis_data_container) { for (j in group_keywords) { if (!is.null(x$endpoint_group_metadata[[j]])) { xlab <- - xlab %>% gsub(paste0("<", j, ">"), - as.character(x$endpoint_group_metadata[[j]]), - .) + xlab %>% gsub( + paste0("<", j, ">"), + as.character(x$endpoint_group_metadata[[j]]), + . + ) } } } @@ -95,9 +100,9 @@ expand_over_endpoints <- function(ep, analysis_data_container) { ) ) -out <- ep_exp[, .SD, .SDcols=keep] -setkey(out, key_analysis_data) -out[] + out <- ep_exp[, .SD, .SDcols = keep] + setkey(out, key_analysis_data) + out[] } @@ -124,15 +129,16 @@ out[] #' @export #' define_expanded_ep <- function(x, group_by, forced_group_levels = NULL, col_prefix = "endpoint_group") { - if (!is.list(group_by) || all(is.na(group_by))) + if (!is.list(group_by) || all(is.na(group_by))) { return(NA) + } - col_name_meta = paste(col_prefix, "metadata", sep="_") - col_name_filter = paste(col_prefix, "filter", sep="_") + col_name_meta <- paste(col_prefix, "metadata", sep = "_") + col_name_filter <- paste(col_prefix, "filter", sep = "_") out <- index_expanded_ep_groups(x, group_by, forced_group_levels) %>% construct_group_filter(col_name_filter = col_name_filter) - out[, (col_name_meta) := .(list(lapply(.SD, identity))), by=1:nrow(out), .SDcols = names(group_by)] + out[, (col_name_meta) := .(list(lapply(.SD, identity))), by = 1:nrow(out), .SDcols = names(group_by)] out[, .SD, .SDcols = c(col_name_meta, col_name_filter)] } @@ -185,10 +191,11 @@ index_expanded_ep_groups <- function(x, group_by, forced_group_levels = NULL) { index_non_null_group_level(group_by) if (length(specified_group_levels) > 0) { var_group_levels <- names(specified_group_levels) - if (length(var_group_levels) > 1) + if (length(var_group_levels) > 1) { stop("Support for multiple variables specifying group levels not yet supported") + } combos_subset <- - combos_all[tolower(get(var_group_levels)) %in% tolower(specified_group_levels[[var_group_levels]]),] + combos_all[tolower(get(var_group_levels)) %in% tolower(specified_group_levels[[var_group_levels]]), ] } else { combos_subset <- combos_all } @@ -196,14 +203,14 @@ index_expanded_ep_groups <- function(x, group_by, forced_group_levels = NULL) { # Expand by all possible combinations of group-by columns in combos_subset. if (length(group_by) == 1) { return(combos_subset) - }else{ + } else { unique_vals <- lapply(combos_subset, unique) combos_expanded <- setDT(expand.grid(unique_vals, stringsAsFactors = FALSE)) return(combos_expanded) } } -construct_group_filter <- function(x, col_name_filter="endpoint_group_filter") { +construct_group_filter <- function(x, col_name_filter = "endpoint_group_filter") { out <- copy(x) filter_str_vec <- purrr::pmap(x, create_condition_str) %>% unlist(recursive = F) @@ -217,7 +224,7 @@ create_condition_str <- function(...) { purrr::map2_chr(names(lst), lst, ~ paste0(.x, ' == "', .y, '"')) # Concatenate all condition strings with ' & ' and return the result - return(paste(conditions, collapse = ' & ')) + return(paste(conditions, collapse = " & ")) } @@ -249,7 +256,6 @@ add_ep_id <- function(x, grp) { #' #' @return A data.table containing all combinations of group levels exapnded with the forced grouping levels. add_forced_group_levels <- function(combos_all, forced_group_levels) { - # If no forced group levels are present then return early if (is.null(forced_group_levels)) { return(combos_all) @@ -280,8 +286,12 @@ add_forced_group_levels <- function(combos_all, forced_group_levels) { # If the forced group levels cover more than the existing group levels then add them to the group level combinations if (!forced_group_levels_already_present) { cols_from_combos_all <- names(combos_all) != names(forced_group_levels) - col_list_combos_all <- lapply(combos_all[, .SD, .SDcols = cols_from_combos_all], function(x){x}) - col_list_2 <-lapply(forced_group_levels, function(x){x}) + col_list_combos_all <- lapply(combos_all[, .SD, .SDcols = cols_from_combos_all], function(x) { + x + }) + col_list_2 <- lapply(forced_group_levels, function(x) { + x + }) grid_list <- c(col_list_combos_all, col_list_2) return(expand.grid(grid_list) |> setDT()) } diff --git a/R/fetch_db_data.R b/R/fetch_db_data.R index d205b7d..0ede50f 100644 --- a/R/fetch_db_data.R +++ b/R/fetch_db_data.R @@ -31,12 +31,16 @@ fetch_db_data <- adam <- fn_dt[fn_type == "data_prepare"] adam[, c("dat", "error_flag", "error_msg") := eval_data_fn( - study_metadata = study_metadata, - fn_list = fn_callable), by = - seq_len(nrow(adam))] - adam[, error_flag := unlist(error_flag)] - if (sum(adam$error_flag) > 0) + study_metadata = study_metadata, + fn = fn_callable + ), + by = + seq_len(nrow(adam)) + ] + adam[, error_flag := unlist(error_flag)] + if (sum(adam$error_flag) > 0) { throw_error_adam(adam) + } return(adam[, .(fn_type, fn_hash, fn_name, fn_call_char, fn_callable, dat)]) } diff --git a/R/filter_db_data.R b/R/filter_db_data.R index c179cbb..995da38 100644 --- a/R/filter_db_data.R +++ b/R/filter_db_data.R @@ -26,15 +26,17 @@ filter_db_data <- function(ep, ep_fn_map, adam_db) { ep_adam <- merge(ep, - ep_fn_map[fn_type == "data_prepare", .(endpoint_spec_id, fn_hash, fn_type)], - by = "endpoint_spec_id") + ep_fn_map[fn_type == "data_prepare", .(endpoint_spec_id, fn_hash, fn_type)], + by = "endpoint_spec_id" + ) ep_adam <- merge(ep_adam, - adam_db[, .(fn_hash, dat)], - by = "fn_hash", - all.x = TRUE, - all.y = FALSE) + adam_db[, .(fn_hash, dat)], + by = "fn_hash", + all.x = TRUE, + all.y = FALSE + ) ep_adam[, dat_analysis := llist( @@ -52,11 +54,14 @@ filter_db_data <- function(ep, ep_fn_map, adam_db) { setnames(ep_adam, "dat_analysis", "dat") ep_adam[, - key_analysis_data := digest::digest(list(fn_hash, - pop_var, - pop_value, - custom_pop_filter)), - by = 1:nrow(ep_adam)] + key_analysis_data := digest::digest(list( + fn_hash, + pop_var, + pop_value, + custom_pop_filter + )), + by = 1:nrow(ep_adam) + ] setkey(ep_adam, key_analysis_data) # The data container only keeps one row per unique analysis dataset analysis_data_container <- ep_adam[, .(dat, key_analysis_data)] @@ -64,8 +69,10 @@ filter_db_data <- function(ep, ep_fn_map, adam_db) { analysis_data_container[, unique(analysis_data_container, by = "key_analysis_data")] ep_adam[["dat"]] <- NULL - return(list(ep = ep_adam, - analysis_data_container = analysis_data_container)) + return(list( + ep = ep_adam, + analysis_data_container = analysis_data_container + )) } @@ -90,7 +97,8 @@ filter_adam_db <- filter_str <- construct_data_filter_logic( var_value_pairs = list(c(pop_var, pop_value)), - singletons = custom_pop_filter) + singletons = custom_pop_filter + ) apply_dt_filter(dat, filter_str, type = "filter") } @@ -125,5 +133,4 @@ apply_dt_filter <- out[, event_flag := FALSE] out[eval(parse(text = filter_string)), event_flag := TRUE] return(out) - } diff --git a/R/global.R b/R/global.R index 348185b..7a6a9f7 100644 --- a/R/global.R +++ b/R/global.R @@ -64,5 +64,4 @@ helper_calls_to_imports <- function(){ future.callr::callr tarchetypes::walk_ast targets::tar_warning - } diff --git a/R/group_ep_for_targets.R b/R/group_ep_for_targets.R index c11f9a6..7737f1c 100644 --- a/R/group_ep_for_targets.R +++ b/R/group_ep_for_targets.R @@ -11,6 +11,6 @@ group_ep_for_targets <- function(ep, n_per_group){ targets_group <- NULL # To satisfy R CMD check x <- copy(ep) n_rows <- nrow(x) - x[, targets_group :=(.I-1) %/% n_per_group] + x[, targets_group := (.I - 1) %/% n_per_group] x[] } diff --git a/R/handle_mk_fn.R b/R/handle_mk_fn.R index 2a597d5..8fdd3f0 100644 --- a/R/handle_mk_fn.R +++ b/R/handle_mk_fn.R @@ -16,10 +16,9 @@ handle_mk_fn <- type <- match.arg(type) if (is.null(fn)) { - if (type != "mk_endpoint_def") { nm <- paste0(type, "_scaffold.R") - } else{ + } else { nm <- paste0(type, ".R") } path <- paste0(r_functions_dir, nm) @@ -32,7 +31,6 @@ handle_mk_fn <- open = TRUE ) )) - } if (length(fn) == 1) { return( @@ -58,7 +56,6 @@ handle_mk_fn <- nm, env = env ) - } handle_mk_fn_ <- @@ -81,8 +78,10 @@ handle_mk_fn_ <- fn_out <- paste0(deparse(fn_evaled), "()") } else { if (!is.function(fn_evaled)) { - stop(type, - "_fn must be a call to a function defining the endpoints") + stop( + type, + "_fn must be a call to a function defining the endpoints" + ) } fn_bod <- deparse(fn_evaled) fn_bod[1] <- gsub("\\s+", "", fn_bod[1]) @@ -97,7 +96,7 @@ handle_mk_fn_ <- file.create(path_normalized) writeLines(fn_out, path_normalized) - } else{ + } else { overwrite <- usethis::ui_yeah("Overwrite pre-existing file {path}?") if (overwrite) { @@ -110,5 +109,4 @@ handle_mk_fn_ <- # Open file for user usethis::edit_file(path) return(invisible(normalizePath(path, mustWork = FALSE))) - } diff --git a/R/mk_endpoint_str.R b/R/mk_endpoint_str.R index 549a842..5a81dd6 100644 --- a/R/mk_endpoint_str.R +++ b/R/mk_endpoint_str.R @@ -49,43 +49,43 @@ mk_endpoint_str <- function(study_metadata = NULL, if (!is.function(data_prepare)) { stop("Argument 'data_prepare' needs to be an unquoted function name") } - data_prepare <- substitute(list(data_prepare)) + data_prepare <- substitute(list(data_prepare)) if (is.function(crit_endpoint)) { crit_endpoint <- substitute(list(crit_endpoint)) - } else{ + } else { crit_endpoint <- substitute(crit_endpoint) } if (is.function(crit_by_strata_across_trt)) { crit_by_strata_across_trt <- substitute(list(crit_by_strata_across_trt)) - } else{ + } else { crit_by_strata_across_trt <- substitute(crit_by_strata_across_trt) } if (is.function(crit_by_strata_by_trt)) { crit_by_strata_by_trt <- substitute(list(crit_by_strata_by_trt)) - } else{ + } else { crit_by_strata_by_trt <- substitute(crit_by_strata_by_trt) } if (is.function(stat_by_strata_by_trt)) { stat_by_strata_by_trt <- substitute(list(stat_by_strata_by_trt)) - } else{ + } else { stat_by_strata_by_trt <- substitute(stat_by_strata_by_trt) } if (is.function(stat_by_strata_across_trt)) { stat_by_strata_across_trt <- substitute(list(stat_by_strata_across_trt)) - } else{ + } else { stat_by_strata_across_trt <- substitute(stat_by_strata_across_trt) } if (is.function(stat_across_strata_across_trt)) { stat_across_strata_across_trt <- substitute(list(stat_across_strata_across_trt)) - } else{ + } else { stat_across_strata_across_trt <- substitute(stat_across_strata_across_trt) } @@ -104,7 +104,7 @@ mk_endpoint_str <- function(study_metadata = NULL, treatment_refval = treatment_refval, period_var = period_var, period_value = period_value, - custom_pop_filter=custom_pop_filter, + custom_pop_filter = custom_pop_filter, endpoint_filter = endpoint_filter, group_by = group_by, stratify_by = stratify_by, diff --git a/R/mk_userdef_fn_dt.R b/R/mk_userdef_fn_dt.R index 371dad0..220c755 100644 --- a/R/mk_userdef_fn_dt.R +++ b/R/mk_userdef_fn_dt.R @@ -32,17 +32,18 @@ mk_userdef_fn_dt <- function(x, env=parent.frame()){ fn <- fn_name <- fn_hash <- fn_callable <- NULL # To satisfy R CMD check # Take only the unique rows based on the hash. - unique_hash_table <- unique(x, by="fn_hash") + unique_hash_table <- unique(x, by = "fn_hash") # Run the function over all rows functions_table <- unique_hash_table[, - generate_function_table_row(fn_type, fn, fn_name, fn_hash, env), - by=seq_len(nrow(unique_hash_table))] + generate_function_table_row(fn_type, fn, fn_name, fn_hash, env), + by = seq_len(nrow(unique_hash_table)) + ] # Validate functions by their expected inputs. functions_table[, - validate_usr_fn_args(fn=fn_callable[[1]], fn_type = fn_type, fn_name = fn_name), - by=seq_len(nrow(functions_table)) + validate_usr_fn_args(fn = fn_callable[[1]], fn_type = fn_type, fn_name = fn_name), + by = seq_len(nrow(functions_table)) ] # Drop the column used for the running. @@ -66,10 +67,8 @@ mk_userdef_fn_dt <- function(x, env=parent.frame()){ #' @param env The environment in which to evaluate the function. #' #' @return A `data.table` row with the function's details. -generate_function_table_row <- function(fn_type, fn, fn_name, fn_hash, env){ - - - if (is.null(fn[[1]])){ +generate_function_table_row <- function(fn_type, fn, fn_name, fn_hash, env) { + if (is.null(fn[[1]])) { out_row <- data.table::data.table( fn_type = as.character(fn_type), fn_hash = fn_hash, @@ -80,12 +79,12 @@ generate_function_table_row <- function(fn_type, fn, fn_name, fn_hash, env){ return(out_row) } out_row <- data.table::data.table( - fn_type = as.character(fn_type), - fn_hash = fn_hash, - fn_name = fn_name, - fn_call_char = as.character(fn), - fn_callable = parse_function_input(eval(fn[[1]], envir = env)) - ) + fn_type = as.character(fn_type), + fn_hash = fn_hash, + fn_name = fn_name, + fn_call_char = as.character(fn), + fn_callable = parse_function_input(eval(fn[[1]], envir = env)) + ) return(out_row) } diff --git a/R/parse_function_inputs.R b/R/parse_function_inputs.R index 94c9d44..f5a7e5b 100644 --- a/R/parse_function_inputs.R +++ b/R/parse_function_inputs.R @@ -18,17 +18,18 @@ parse_function_input <- function(fn_input) { # returns functions with arguments wrapped in partial # Should include checks to ensure function and args are valid - if (length(fn_input) == 1){ - if(is.list(fn_input)){ + if (length(fn_input) == 1) { + if (is.list(fn_input)) { fn_input <- fn_input[[1]] } - if(!is.function(fn_input)) + if (!is.function(fn_input)) { stop("`", fn_input, "` is not a valid function") + } # check it is a function return(fn_input) } else { # Check that arguments are valid. - #... + # ... return(purrr::partial(fn_input[[1]], !!!fn_input[-1])) } diff --git a/R/prepare_for_stats.R b/R/prepare_for_stats.R index 3bcef53..74bb3aa 100644 --- a/R/prepare_for_stats.R +++ b/R/prepare_for_stats.R @@ -41,8 +41,7 @@ prepare_for_stats <- function(ep, type <- match.arg(type) # Map stat function type to associated criterion variable - crit_var <- switch( - type, + crit_var <- switch(type, "stat_by_strata_by_trt" = "crit_accept_by_strata_by_trt", "stat_by_strata_across_trt" = "crit_accept_by_strata_across_trt", "stat_across_strata_across_trt" = "crit_accept_by_strata_across_trt", @@ -50,8 +49,7 @@ prepare_for_stats <- function(ep, ) # Set of columns used for slicing the population depending on the type of stat function - grouping_cols <- switch( - type, + grouping_cols <- switch(type, "stat_by_strata_by_trt" = c("strata_var", "treatment_var"), "stat_by_strata_across_trt" = c("strata_var"), "stat_across_strata_across_trt" = c("strata_var", "treatment_var"), @@ -64,15 +62,14 @@ prepare_for_stats <- function(ep, # 1) no endpoint rows are accepted by criterion # 2) no stat functions are supplied # 3) no stratum is accepted when preparing for stat_across_strata_across_trt - if(nrow(ep_accepted) == 0 | - nrow(fn_map[fn_type == type]) == 0 | - (type == "stat_across_strata_across_trt" & !any(ep_accepted[[grouping_cols[[1]]]] != "TOTAL_")) - ){ + if (nrow(ep_accepted) == 0 | + nrow(fn_map[fn_type == type]) == 0 | + (type == "stat_across_strata_across_trt" & !any(ep_accepted[[grouping_cols[[1]]]] != "TOTAL_")) + ) { return(data.table::data.table(SKIP_ = TRUE)) } if (type %in% c("stat_by_strata_by_trt", "stat_by_strata_across_trt")) { - # Expand endpoints by treatment and/or strata ep_expanded <- expand_ep_for_stats( @@ -80,7 +77,7 @@ prepare_for_stats <- function(ep, grouping_cols = grouping_cols, analysis_data_container = analysis_data_container, data_col = data_col, - id_col = id_col, + id_col = id_col, col_prefix = "stat" ) @@ -90,22 +87,24 @@ prepare_for_stats <- function(ep, # Join stat function data so that each row represent a function call ep_fn <- merge(ep_expanded, - fn_map[fn_type == type], - by = "endpoint_spec_id", - allow.cartesian = TRUE) + fn_map[fn_type == type], + by = "endpoint_spec_id", + allow.cartesian = TRUE + ) # Create unique id for stat function call ep_fn[, stat_result_id := paste(get(id_col), fn_hash, formatC(.I, width = 4, format = "d", flag = "0"), - sep = "-")] + sep = "-" + )] return(ep_fn) - } else{ - + } else { ep_fn <- merge(ep_accepted, - fn_map[fn_type == type], - by = "endpoint_spec_id", - allow.cartesian = TRUE) + fn_map[fn_type == type], + by = "endpoint_spec_id", + allow.cartesian = TRUE + ) # For stat_across_strata_across_trt we test interaction effect between treatment and strata # So Treatment ~ SEX we therefore add an empty filter and an metadata containing all the levels. @@ -114,35 +113,38 @@ prepare_for_stats <- function(ep, ep_with_data <- ep_fn[analysis_data_container] ep_sg <- ep_with_data[get(grouping_cols[1]) != "TOTAL_", ] ep_sg <- - ep_sg[, c("stat_event_exist", - "stat_metadata", - "stat_filter", - "stat_result_id", - "cell_index") := - c(TRUE, - llist(c( - list_group_and_levels(get(data_col)[[1]], get(grouping_cols[1])), - list_group_and_levels(get(data_col)[[1]], get(grouping_cols[2])) - )), - "", - paste( - get(id_col), - fn_hash, - formatC( - .I, - width = 4, - format = "d", - flag = "0" - ), - sep="-" - ), - llist(get(data_col)[[1]][["INDEX_"]])), - by = 1:nrow(ep_sg)] + ep_sg[, c( + "stat_event_exist", + "stat_metadata", + "stat_filter", + "stat_result_id", + "cell_index" + ) := + c( + TRUE, + llist(c( + list_group_and_levels(get(data_col)[[1]], get(grouping_cols[1])), + list_group_and_levels(get(data_col)[[1]], get(grouping_cols[2])) + )), + "", + paste( + get(id_col), + fn_hash, + formatC( + .I, + width = 4, + format = "d", + flag = "0" + ), + sep = "-" + ), + llist(get(data_col)[[1]][["INDEX_"]]) + ), + by = 1:nrow(ep_sg) + ] ep_sg[, (data_col) := NULL] return(ep_sg) - } - } @@ -161,11 +163,10 @@ prepare_for_stats <- function(ep, #' @noRd list_group_and_levels <- function( data, - grouping_col -){ + grouping_col) { l <- list(data[, unique(get(grouping_col))]) names(l) <- grouping_col - return (l) + return(l) } #' Expand Endpoint Data for Statistics @@ -198,37 +199,41 @@ expand_ep_for_stats <- function( key_analysis_data <- stat_expand_spec <- cell_index <- NULL # To satisfy R CMD check - name_expand_col <- paste(col_prefix, "expand_spec", sep="_") - - ep[,"_i_" := .I] + ep[, "_i_" := .I] setkey(ep, key_analysis_data) ep_with_data <- ep[analysis_data_container, nomatch = NULL] ep_with_data[, - stat_expand_spec := llist( - define_expansion_cell_from_data( - row=.SD, - grouping_cols = grouping_cols, - data_col = data_col, - col_prefix = col_prefix - )), - by = "_i_"] + stat_expand_spec := llist( + define_expansion_cell_from_data( + row = .SD, + grouping_cols = grouping_cols, + data_col = data_col, + col_prefix = col_prefix + ) + ), + by = "_i_" + ] # We remove the clinical data, otherwise the memory usage during the unnest # step will explode - ep_with_data[, (data_col):=NULL] + ep_with_data[, (data_col) := NULL] - ep_exp <- ep_with_data %>% tidyr::unnest(col = stat_expand_spec) %>% setDT() + ep_exp <- ep_with_data %>% + tidyr::unnest(col = stat_expand_spec) %>% + setDT() setkey(ep_exp, key_analysis_data) - ep_exp[,"_i_":= .I] + ep_exp[, "_i_" := .I] ep_exp_with_data <- ep_exp[analysis_data_container, nomatch = NULL] - filter_col_name <- paste(col_prefix, "filter", sep="_") + filter_col_name <- paste(col_prefix, "filter", sep = "_") ep_exp_with_data[, cell_index := llist(create_flag(get(data_col)[[1]], - singletons = c(get(filter_col_name)[[1]]))), - by = "_i_"] + singletons = c(get(filter_col_name)[[1]]) + )), + by = "_i_" + ] ep_exp_with_data[, (data_col) := NULL] ep_exp_with_data[, "_i_" := NULL] @@ -255,37 +260,36 @@ define_expansion_cell_from_data <- function( row, grouping_cols, data_col, - col_prefix -){ - if (is.character(grouping_cols)){ + col_prefix) { + if (is.character(grouping_cols)) { grouping_cols <- c(grouping_cols) } stopifnot(all(grouping_cols %in% names(row))) # Get the actual grouping variables - grouping_col_values <- row[, .SD, .SDcols=grouping_cols] - grouping_var_list <- vector(mode="list", length(grouping_col_values)) + grouping_col_values <- row[, .SD, .SDcols = grouping_cols] + grouping_var_list <- vector(mode = "list", length(grouping_col_values)) names(grouping_var_list) <- grouping_col_values - if(row[["only_strata_with_events"]]){ - dat <- row[,get(data_col)][[1]][row[["event_index"]]] - }else{ - dat <- row[,get(data_col)][[1]] + if (row[["only_strata_with_events"]]) { + dat <- row[, get(data_col)][[1]][row[["event_index"]]] + } else { + dat <- row[, get(data_col)][[1]] } # If treatment is part of grouping then force all treatment arms to be present in the group levels - if("treatment_var" %in% grouping_cols){ - trt_arms <- data.table(unique(row[,get(data_col)][[1]][,get(row[["treatment_var"]])])) + if ("treatment_var" %in% grouping_cols) { + trt_arms <- data.table(unique(row[, get(data_col)][[1]][, get(row[["treatment_var"]])])) names(trt_arms) <- row[["treatment_var"]] - }else{ + } else { trt_arms <- NULL } exp_dt <- define_expanded_ep(x = dat, group_by = grouping_var_list, forced_group_levels = trt_arms, col_prefix = col_prefix) - return (exp_dt) + return(exp_dt) } -add_total_meta <- function(x, meta_col, total_meta){ +add_total_meta <- function(x, meta_col, total_meta) { llist(c(x[[meta_col]][[1]], total_meta)) } diff --git a/R/try_and_validate.R b/R/try_and_validate.R index 31956cd..3eaea3c 100644 --- a/R/try_and_validate.R +++ b/R/try_and_validate.R @@ -16,7 +16,7 @@ #' @export try_and_validate <- function(expr_, expr_name = NA_character_, - #TODO Allow forwarding of meaning full names. + # TODO Allow forwarding of meaning full names. debug_dir = "debug", validator = function(expr_result) { NA_character_ @@ -45,15 +45,17 @@ try_and_validate <- function(expr_, expr_result <- try(expr = expr_, silent = TRUE) if (inherits(expr_result, "try-error")) { - err_msg <- paste0("Failed to EVALUATE function with error:", - "\n ", expr_result[[1]]) - + err_msg <- paste0( + "Failed to EVALUATE function with error:", + "\n ", expr_result[[1]] + ) } else if (!is.na(validator_err <- validator(expr_result))) { - #validate output + # validate output err_msg <- paste("Failed to VALIDATE function output with error:", - validator_err, - sep = "\n") + validator_err, + sep = "\n" + ) } else { # Return valid result return(expr_result) @@ -79,8 +81,9 @@ try_and_validate <- function(expr_, # Prepare error message. full_error <- paste(sprintf("\nError during evaluation of: %s", expr_name), - err_msg, - sep = "\n") + err_msg, + sep = "\n" + ) if (!stage_debugging) { stop(full_error) @@ -106,7 +109,6 @@ try_and_validate <- function(expr_, sep = "\n" ) stop(full_error) - } @@ -142,7 +144,7 @@ stage_debug <- norm_dir <- normalizePath(debug_dir) filepath <- file.path(norm_dir, paste0(fn_name, ".Rdata")) - saveRDS(debug_env, file = filepath) #Set dynamically + saveRDS(debug_env, file = filepath) # Set dynamically return(filepath) } @@ -173,9 +175,9 @@ load_debug_session <- function(debug_file) { if (is.primitive(debug_env$fn)) { cli::cli_alert_danger( - "The inspected function ({.val {deparse(debug_env$fn)}}) is a + "The inspected function ({.val {deparse(debug_env$fn)}}) is a primitive and cannot be inspected using debugonce.\ - You can still load the debug environemnt and inspect + You can still load the debug environemnt and inspect inputs and function: readRDS({.path {debug_file}})", wrap = TRUE ) @@ -193,7 +195,7 @@ load_debug_session <- function(debug_file) { extra_libraries <- setdiff(debug_env$ns, search()) if (length(extra_libraries) > 0) { - cli::cli_alert_warning("The following libraries was available at runtime + cli::cli_alert_warning("The following libraries was available at runtime but isn't currently.") cli::cli_li(extra_libraries) } @@ -219,7 +221,7 @@ load_debug_session <- function(debug_file) { validate_crit_output <- function(output) { if (!(isTRUE(output) | isFALSE(output))) { paste( - "The return value from the endpoint criterion + "The return value from the endpoint criterion function must be a logical of length 1, i.e.", "TRUE or FALSE" ) @@ -240,8 +242,10 @@ validate_crit_output <- function(output) { validate_stat_output <- function(output) { # if not a DT return early if (!data.table::is.data.table(output)) { - err_msg <- paste0("Expected (data.table::data.table). Found: ", - class(output)) + err_msg <- paste0( + "Expected (data.table::data.table). Found: ", + class(output) + ) return(err_msg) } @@ -263,10 +267,12 @@ validate_stat_output <- function(output) { " )" ) if (length(actual_diff) > 0) { - err_msg <- paste0(err_msg, - "\n\tExtra items in actual: ( ", - paste(actual_diff, collapse = ", "), - " )") + err_msg <- paste0( + err_msg, + "\n\tExtra items in actual: ( ", + paste(actual_diff, collapse = ", "), + " )" + ) } if (length(expected_diff) > 0) { err_msg <- paste0( diff --git a/R/unnest_by_fns.R b/R/unnest_by_fns.R index 13cd648..7e82fb6 100644 --- a/R/unnest_by_fns.R +++ b/R/unnest_by_fns.R @@ -20,7 +20,7 @@ unnest_by_fns <- function(dt, cols) { } missing_cols <- setdiff(cols, colnames(dt)) - if(length(missing_cols)>0){ + if (length(missing_cols) > 0) { stop("The following columns are not found in the provided data.table to unnest:\n-", paste0(missing_cols, collapse = "\n-")) } long <- @@ -32,18 +32,18 @@ unnest_by_fns <- function(dt, cols) { variable.factor = FALSE, ) - long[, fn := purrr::map(fn_list, function(i) - as.list(i[-1]))] - long[, fn_name:=character()] + long[, fn := purrr::map(fn_list, function(i) { + as.list(i[-1]) + })] + long[, fn_name := character()] long[, fn_name := purrr::map(long$fn, function(i) { - x <- names(i) if (is.null(x)) { return(NA_character_) } return(x) })] - if(nrow(long)==1){ + if (nrow(long) == 1) { x <- long } else { x <- tidyr::unnest(long, c(fn, fn_name)) %>% as.data.table() @@ -53,22 +53,21 @@ unnest_by_fns <- function(dt, cols) { # When no fn get a name in the tidy::unnest(), the fn_name gets transformed # to a logical. This needs to be converted back to a character so names can be # assigned downstream - x[,fn_name:=as.character(fn_name)] + x[, fn_name := as.character(fn_name)] # Index rows where we have to assign function name manually. This happens # because when there is only one function provided the logic is different than # when multiple are provided inx <- x[, purrr::map_lgl(fn, is.name)] & x[, is.na(fn_name)] - x[inx==TRUE, fn_name := as.character(fn)] + x[inx == TRUE, fn_name := as.character(fn)] # For unnamed fns supplied in style of `list(c(fn, arg))` we need a different # approach inx <- x[, is.na(fn_name)] - if(length(inx)>0) { + if (length(inx) > 0) { fn_names <- vapply(x[inx, fn], function(i) { - if (length(i) == 3 && i[[1]] == "::") { return(deparse(i)) } @@ -78,5 +77,5 @@ unnest_by_fns <- function(dt, cols) { } keep <- setdiff(colnames(x), c(cols, "fn_list")) - return(x[, .SD, .SDcols=keep]) + return(x[, .SD, .SDcols = keep]) } diff --git a/R/unnest_endpoint_functions.R b/R/unnest_endpoint_functions.R index f4a8f8b..e4ec209 100644 --- a/R/unnest_endpoint_functions.R +++ b/R/unnest_endpoint_functions.R @@ -28,7 +28,8 @@ unnest_endpoint_functions <- function(endpoint_defs, "stat_across_strata_across_trt", "crit_endpoint", "crit_by_strata_by_trt", - "crit_by_strata_across_trt"), + "crit_by_strata_across_trt" + ), env = parent.frame()) { fn_hash <- fn <- fn_type <- fn_name <- NULL # To satisfy R CMD check endpoints_long <- unnest_by_fns(endpoint_defs, fn_cols) diff --git a/R/use_chef.R b/R/use_chef.R index 27e89b7..ddcd5f3 100644 --- a/R/use_chef.R +++ b/R/use_chef.R @@ -53,7 +53,7 @@ use_chef <- mk_criteria_fn = NULL, branch_group_size = 100, env = parent.frame()) { - file_name = paste0("pipeline_", pipeline_id, ".R") + file_name <- paste0("pipeline_", pipeline_id, ".R") mk_ep_def_template <- "template-mk_endpoint_def.R" # Create directories if none exist @@ -76,8 +76,9 @@ use_chef <- pkg_file_exists <- file.exists(pkg_file_path_norm) if (!pkg_file_exists) { usethis::use_template("packages_template.R", - package = "chef", - save_as = pkg_file_path) + package = "chef", + save_as = pkg_file_path + ) } # Write the pipeline scaffold @@ -157,7 +158,6 @@ run_pipeline <- function(pipeline_id = NULL, stage_pipeline(pipeline_name = nm) targets::tar_make() - } #' Stage a {targets} pipeline so that you can work interactively with it @@ -186,5 +186,4 @@ stage_pipeline <- } Sys.setenv(TAR_PROJECT = nm) - } diff --git a/R/utils.R b/R/utils.R index b402dc2..f373026 100644 --- a/R/utils.R +++ b/R/utils.R @@ -10,6 +10,6 @@ llist <- function(...) { list(list(...)) } -str_to_sentence_base <- function(x){ +str_to_sentence_base <- function(x) { paste0(toupper(substring(x, 1, 1)), substring(x, 2)) } diff --git a/R/validate_endpoints_def.R b/R/validate_endpoints_def.R index 3b02712..a60c660 100644 --- a/R/validate_endpoints_def.R +++ b/R/validate_endpoints_def.R @@ -44,33 +44,31 @@ validate_endpoints_def <- function(endpoint_base) { ), call. = FALSE ) - } - validate_period_specification <- + validate_period_specification <- function(period_var, period_value) { arg_list <- list(period_var = period_var, period_value = period_value) if (!anyNA(arg_list)) { return(invisible(TRUE)) } - if (all(is.na(arg_list))){ + if (all(is.na(arg_list))) { return(invisible(TRUE)) } - missing_arg <- - arg_list[vapply(arg_list, is.na, logical(1L))] |> names() - non_missing_arg <- - arg_list[!vapply(arg_list, is.na, logical(1L))] |> names() - stop( - "`",non_missing_arg,"`", - " is supplied in the endpoint specification, but ", - "`",missing_arg,"`", - " is not. Either both need to be provided (non-`NA` values), or both need to be empty", - call. = FALSE - - ) -} + missing_arg <- + arg_list[vapply(arg_list, is.na, logical(1L))] |> names() + non_missing_arg <- + arg_list[!vapply(arg_list, is.na, logical(1L))] |> names() + stop( + "`", non_missing_arg, "`", + " is supplied in the endpoint specification, but ", + "`", missing_arg, "`", + " is not. Either both need to be provided (non-`NA` values), or both need to be empty", + call. = FALSE + ) + } validate_period_specification(endpoint_base$period_var, endpoint_base$period_value) @@ -86,13 +84,14 @@ validate_endpoints_def <- function(endpoint_base) { paste0( "The following columns in the endpoint definition data.table have the incorrect class:\n-", paste0(cli::style_bold( - missmatch$col_name), collapse = "\n-"), + missmatch$col_name + ), collapse = "\n-"), ". \n\n Please check the endpoint_definition function" ) ) } col_class_expected[grepl("data_prepare|stat_by_strata_by_trt|analysis_stats", col_name), class_nested := - "function"] + "function"] check_fn_calls(col_class_expected, endpoint_base) @@ -108,7 +107,7 @@ build_expected_col_classes <- function() { treatment_refval = "character", period_var = "character", period_value = "character", - custom_pop_filter="character", + custom_pop_filter = "character", endpoint_filter = "character", group_by = "list", group_by = "character", @@ -123,8 +122,10 @@ build_expected_col_classes <- function() { crit_by_strata_across_trt = "list", only_strata_with_events = "logical" ) - data.table::data.table(col_name = names(col_class_expected_vec), - col_class = col_class_expected_vec) + data.table::data.table( + col_name = names(col_class_expected_vec), + col_class = col_class_expected_vec + ) } build_actual_col_classes <- function(endpoint_base) { @@ -138,12 +139,15 @@ build_actual_col_classes <- function(endpoint_base) { check_fn_calls <- function(col_class_expected, ep_def) { class_nested <- NULL # To satisfy R CMD check fn_inx <- col_class_expected[class_nested == "function"] - lapply(fn_inx$col_name, function(i) - lapply(ep_def[[i]], error_not_fn, i)) + lapply(fn_inx$col_name, function(i) { + lapply(ep_def[[i]], error_not_fn, i) + }) } error_not_fn <- function(x, i) { - if(is.null(x)|| is.null(x[[1]]))return(message("No functions provided for: ", i)) + if (is.null(x) || is.null(x[[1]])) { + return(message("No functions provided for: ", i)) + } if (!is.call(x)) { stop( "The argument to ", diff --git a/R/validate_usr_fn_args.R b/R/validate_usr_fn_args.R index b36b3cf..4574fa5 100644 --- a/R/validate_usr_fn_args.R +++ b/R/validate_usr_fn_args.R @@ -59,7 +59,6 @@ validate_usr_fn_args <- function(fn, "treatment_refval", "subjectid_var" ), - stat_across_strata_across_trt = c( "dat", diff --git a/README.Rmd b/README.Rmd index d005af3..0a92807 100644 --- a/README.Rmd +++ b/README.Rmd @@ -13,37 +13,17 @@ knitr::opts_chunk$set(echo = TRUE) # Purpose -To provide an open-source opinionated framework for setting up pipelines for AMNOG-style HTA analyses. {chef} is currently in a development phase, so should be used with caution. +To provide an open-source opinionated framework for setting up pipelines for AMNOG-style HTA analyses in conjunction with the [ramnog](https://github.com/hta-pharma/ramnog) package. Chef is currently in a development phase, so should be used with caution. # Aim -The {chef} aim is that a programmer has to write minimal code, and no programming to in order to set-up a new AMNOG-type analyses. For each study, the programmer will need to make, adjust, or check the following four types of code: +The chef aim is that a programmer has to write minimal code, and no programming to in order to set-up a new AMNOG-type analyses. For each study, the programmer will need to make, adjust, or check the following four types of code: -1. The definition of each endpoint (or group of endpoints). See `vignette("endpoint_definitions")` -2. A set of adam functions that makes any modifications to existing ADaM datasets (e.g., new age grouping in ADSL), or makes new ADaM datasets if none exist for the required output. See `vignette("mk_adam")` -3. (If needed) Define a set of criteria for when an endpoint should be included in the results. A library of these criteria are stored in the companion package {chefCriteria}. See `vignette("criteria_functions")` -4. A specification of the statistical functions used to summarize/analyze the data. (see section) +1. The definition of each endpoint (or group of endpoints). +2. A set of adam functions that makes any modifications to existing ADaM datasets (e.g., new age grouping in ADSL), or makes new ADaM datasets if none exist for the required output. +3. (If needed) Define a set of criteria for when an endpoint should be included in the results. A library of these criteria are stored in the companion package {chefCriteria} +4. A specification of the statistical functions used to summarize/analyze the data. -Behind the scenes, {chef} uses the {[targets](https://books.ropensci.org/targets/)} package to handle the pipelines. +Behind the scenes chef uses the {[targets](https://books.ropensci.org/targets/)} package to handle the pipelines. -# Setup - -## Install githooks - -This project supports two styles of githooks. - -1. The first style is to use the githooks provided in the `.githooks` directory. To use these hooks, run the following command in the root of the project: - - These hooks are very simple just blocking the commit to protected branches. - -``` -git config --local core.hooksPath .githooks/ -``` - -2. The second is to install the precommit tool (for linux) [precommit](https://pre-commit.com/). - - These are much more powerful and can be used to run checks on the code before it is committed. - -``` -pipx install pre-commit -# Then run in the root of repo: -pre-commit install -``` +For help and guidance visit building a analysis pipeline, see the [ramnog website](https://hta-pharma.github.io/ramnog/) diff --git a/README.md b/README.md index 5e6e1da..0fa5a7b 100644 --- a/README.md +++ b/README.md @@ -9,58 +9,30 @@ # Purpose To provide an open-source opinionated framework for setting up pipelines -for AMNOG-style HTA analyses. {chef} is currently in a development -phase, so should be used with caution. +for AMNOG-style HTA analyses in conjunction with the +[ramnog](https://github.com/hta-pharma/ramnog) package. Chef is +currently in a development phase, so should be used with caution. # Aim -The {chef} aim is that a programmer has to write minimal code, and no +The chef aim is that a programmer has to write minimal code, and no programming to in order to set-up a new AMNOG-type analyses. For each study, the programmer will need to make, adjust, or check the following four types of code: -1. The definition of each endpoint (or group of endpoints). See - `vignette("endpoint_definitions")` +1. The definition of each endpoint (or group of endpoints). 2. A set of adam functions that makes any modifications to existing ADaM datasets (e.g., new age grouping in ADSL), or makes new ADaM - datasets if none exist for the required output. See - `vignette("mk_adam")` + datasets if none exist for the required output. 3. (If needed) Define a set of criteria for when an endpoint should be included in the results. A library of these criteria are stored in - the companion package {chefCriteria}. See - `vignette("criteria_functions")` + the companion package {chefCriteria} 4. A specification of the statistical functions used to - summarize/analyze the data. (see section) + summarize/analyze the data. -Behind the scenes, {chef} uses the +Behind the scenes chef uses the {[targets](https://books.ropensci.org/targets/)} package to handle the pipelines. -# Setup - -## Install githooks - -This project supports two styles of githooks. - -1. The first style is to use the githooks provided in the `.githooks` - directory. To use these hooks, run the following command in the root - of the project: - -- These hooks are very simple just blocking the commit to protected - branches. - - - - git config --local core.hooksPath .githooks/ - -2. The second is to install the precommit tool (for linux) - [precommit](https://pre-commit.com/). - -- These are much more powerful and can be used to run checks on the code - before it is committed. - - - - pipx install pre-commit - # Then run in the root of repo: - pre-commit install +For help and guidance visit building a analysis pipeline, see the +[ramnog website](https://hta-pharma.github.io/ramnog/) diff --git a/_pkgdown.yml b/_pkgdown.yml index e432f19..d27a13f 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -1,45 +1,3 @@ -url: ~ +url: https://hta-pharma.github.io/chef/ template: bootstrap: 5 - -articles: - -- title: Overview - navbar: Overview - contents: - - ep_overview - -- title: Endpoint Population and Outcome Specification - navbar: Endpoint Population and Outcome Specification - contents: - - ep_spec_adam_data - - ep_spec_treatment_arms - - ep_spec_population_def - - ep_spec_event_def - - ep_spec_strata_def - - ep_spec_label - -- title: Endpoint Statistics - navbar: Endpoint Statistics - contents: - - starts_with('methods_') - -- title: Endpoint Results - navbar: Endpoint Results - contents: - - starts_with("results") - -- title: Endpoint Examples - navbar: Endpoint Examples - contents: - - starts_with("example_") - -- title: Debugging - navbar: Debugging - contents: - - debugging - -- title: Development - navbar: Development - contents: - - starts_with("dev") diff --git a/tests/testthat/helper-custom_expect.R b/tests/testthat/helper-custom_expect.R index f971a04..14d3128 100644 --- a/tests/testthat/helper-custom_expect.R +++ b/tests/testthat/helper-custom_expect.R @@ -1,17 +1,17 @@ -expect_str_contains <- function(object, substring){ +expect_str_contains <- function(object, substring) { # Capture objet and label act <- quasi_label(rlang::enquo(object), arg = "object") # expect act$character <- as.character(object) expect( - grepl(substring, act$character, fixed=T), + grepl(substring, act$character, fixed = T), sprintf("%s Does not contain the substring\n(%s)%s\n(sub)'%s'", act$lab, act$lab, act$character, substring) ) invisible(act$character) } -expect_na_or_null <- function(object){ +expect_na_or_null <- function(object) { # Capture objet and label act <- quasi_label(rlang::enquo(object), arg = "object") @@ -39,7 +39,7 @@ expect_same_items <- function(actual, expected, ...) { exp_msg <- paste0( "Expected: (", paste(expected_sorted, collapse = ", "), ")", "\nFound: (", paste(actual_sorted, collapse = ", ") - ) + ) if (length(actual_diff) > 0) { exp_msg <- paste(exp_msg, "\nExtra items in actual:", actual_diff) } diff --git a/tests/testthat/helper-mk_adam.R b/tests/testthat/helper-mk_adam.R index d1a1237..7acf74f 100644 --- a/tests/testthat/helper-mk_adam.R +++ b/tests/testthat/helper-mk_adam.R @@ -1,41 +1,50 @@ mk_adae <- function(study_metadata) { - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - adsl[, AGEGR2 := data.table::fcase(AGE < 70, "AGE < 70", - AGE >= 70, "AGE >= 70")] + adsl[, AGEGR2 := data.table::fcase( + AGE < 70, "AGE < 70", + AGE >= 70, "AGE >= 70" + )] adae <- data.table::as.data.table(pharmaverseadam::adae) adae_out <- - merge(adsl, adae[, c(setdiff(names(adae), names(adsl)), "USUBJID"), with = - F], by = "USUBJID", all = TRUE) + merge(adsl, adae[, c(setdiff(names(adae), names(adsl)), "USUBJID"), + with = + F + ], by = "USUBJID", all = TRUE) adae_out[] } mk_adex <- function(study_metadata) { - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - adsl[, AGEGR2 := data.table::fcase(AGE < 70, "AGE < 70", - AGE >= 70, "AGE >= 70")] + adsl[, AGEGR2 := data.table::fcase( + AGE < 70, "AGE < 70", + AGE >= 70, "AGE >= 70" + )] adex <- pharmaverseadam::adex adex_out <- - merge(adsl, adex[, c(setdiff(names(adex), names(adsl)), "USUBJID"), with = - F], by = "USUBJID", all = TRUE) + merge(adsl, adex[, c(setdiff(names(adex), names(adsl)), "USUBJID"), + with = + F + ], by = "USUBJID", all = TRUE) adex_out[] } -mk_adcm <- function(study_metadata){ - +mk_adcm <- function(study_metadata) { adsl <- data.table::as.data.table(pharmaverseadam::adsl) - adsl[, AGEGR2 := data.table::fcase(AGE < 70, "AGE < 70", - AGE >= 70, "AGE >= 70")] + adsl[, AGEGR2 := data.table::fcase( + AGE < 70, "AGE < 70", + AGE >= 70, "AGE >= 70" + )] adcm <- data.table::as.data.table(pharmaverseadam::adcm) adcm_out <- - merge(adsl, adcm[, c(setdiff(names(adcm), names(adsl)), "USUBJID"), with = - F], by = "USUBJID", all = TRUE) + merge(adsl, adcm[, c(setdiff(names(adcm), names(adsl)), "USUBJID"), + with = + F + ], by = "USUBJID", all = TRUE) adcm_out[] } diff --git a/tests/testthat/helper-mk_ep.R b/tests/testthat/helper-mk_ep.R index a12da25..cc34e8c 100644 --- a/tests/testthat/helper-mk_ep.R +++ b/tests/testthat/helper-mk_ep.R @@ -14,13 +14,17 @@ mk_ep_0001_awaiting_data <- purrr::partial( stratify_by = list(c("SEX", "AGEGR2")), endpoint_filter = "AESEV == 'MILD'", stat_by_strata_across_trt = list(c()), - stat_by_strata_by_trt = list("n_sub" = n_sub, - "n_subev" = n_subev) + stat_by_strata_by_trt = list( + "n_sub" = n_sub, + "n_subev" = n_subev + ) ) mk_ep_0001_waiting_grps <- purrr::partial( mk_ep_0001_base, stratify_by = list(c("SEX", "AGEGR2")), - stat_by_strata_by_trt = list("n_subj" = n_sub, - "n_subev" = n_subev) + stat_by_strata_by_trt = list( + "n_subj" = n_sub, + "n_subev" = n_subev + ) ) diff --git a/tests/testthat/helper-stat_methods.R b/tests/testthat/helper-stat_methods.R index a00c05b..d69f741 100644 --- a/tests/testthat/helper-stat_methods.R +++ b/tests/testthat/helper-stat_methods.R @@ -65,7 +65,6 @@ summary_stats <- function(dat, var, var_type = c("cont", "cat"), ...) { - # Check argument var_type <- match.arg(var_type) @@ -138,7 +137,6 @@ contingency2x2_ptest <- function(dat, cell_index, treatment_var, ...) { - # Test a 2x2 contingency table ie. is there a link between treatment and total number of events dat_cell <- dat[J(cell_index), ] dat_cell[, is_event := INDEX_ %in% event_index] @@ -171,7 +169,6 @@ contingency2x2_strata_test <- function(dat, treatment_var, subjectid_var, ...) { - # Test a 2x2 contingency table i.e. is there a link between treatment and # patients with events over multiple strata dt_unique_subjects <- dat %>% diff --git a/tests/testthat/test-add_event_index.R b/tests/testthat/test-add_event_index.R index 4392485..9dd3461 100644 --- a/tests/testthat/test-add_event_index.R +++ b/tests/testthat/test-add_event_index.R @@ -13,14 +13,16 @@ test_that("base case: add_event_index works", { custom_pop_filter = NA, key_analysis_data = "a" ) - dat = data.table(dat = list(mk_adae() %>% .[, "INDEX_" := .I]), - key_analysis_data = "a") + dat <- data.table( + dat = list(mk_adae() %>% .[, "INDEX_" := .I]), + key_analysis_data = "a" + ) setkey(ep, key_analysis_data) setkey(dat, key_analysis_data) # ACT --------------------------------------------------------------------- actual <- add_event_index(ep = ep, analysis_data_container = dat) # EXPECT ------------------------------------------------------------------ - expect_equal(actual$event_index[[1]], dat$dat[[1]][SAFFL == "Y" ][["INDEX_"]]) + expect_equal(actual$event_index[[1]], dat$dat[[1]][SAFFL == "Y"][["INDEX_"]]) }) @@ -40,54 +42,56 @@ test_that("add_event_index works with period_var", { custom_pop_filter = NA, key_analysis_data = "a" ) - dat = data.table(dat = list(mk_adcm() %>% .[, "INDEX_" := .I]), - key_analysis_data = "a") + dat <- data.table( + dat = list(mk_adcm() %>% .[, "INDEX_" := .I]), + key_analysis_data = "a" + ) setkey(ep, key_analysis_data) setkey(dat, key_analysis_data) # ACT --------------------------------------------------------------------- actual <- add_event_index(ep = ep, analysis_data_container = dat) # EXPECT ------------------------------------------------------------------ expect_equal(actual$event_index[[1]], dat$dat[[1]][SAFFL == "Y" & - ANL01FL == "Y"][["INDEX_"]]) + ANL01FL == "Y"][["INDEX_"]]) }) -test_that("add_event_index works over multiple rows in ep with custom filter", - { - # SETUP ------------------------------------------------------------------- - ep <- data.table( - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = NA_character_, - period_value = NA_character_, - endpoint_id = 1, - endpoint_filter = NA, - endpoint_group_filter = NA, - custom_pop_filter = "AGE >70", - dat = list(mk_adcm() %>% .[, "INDEX_" := .I]), - key_analysis_data = "a" - ) - dat = data.table(dat = list(mk_adcm() %>% .[, "INDEX_" := .I]), - key_analysis_data = "a") - - setkey(dat, key_analysis_data) - ep <- rbindlist(list(ep, ep)) - setkey(ep, key_analysis_data) - ep[2, `:=` (custom_pop_filter = "AGE <=70", endpoint_id = 2)] - - # ACT --------------------------------------------------------------------- - actual <- add_event_index(ep = ep, dat) - - # EXPECT ------------------------------------------------------------------ - - expect_equal(actual$event_index[[1]], dat$dat[[1]][SAFFL == "Y" & - AGE > 70][["INDEX_"]]) - expect_equal(actual$event_index[[2]], dat$dat[[1]][SAFFL == "Y" & - AGE <= 70][["INDEX_"]]) - - }) +test_that("add_event_index works over multiple rows in ep with custom filter", { + # SETUP ------------------------------------------------------------------- + ep <- data.table( + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + period_var = NA_character_, + period_value = NA_character_, + endpoint_id = 1, + endpoint_filter = NA, + endpoint_group_filter = NA, + custom_pop_filter = "AGE >70", + dat = list(mk_adcm() %>% .[, "INDEX_" := .I]), + key_analysis_data = "a" + ) + dat <- data.table( + dat = list(mk_adcm() %>% .[, "INDEX_" := .I]), + key_analysis_data = "a" + ) + + setkey(dat, key_analysis_data) + ep <- rbindlist(list(ep, ep)) + setkey(ep, key_analysis_data) + ep[2, `:=`(custom_pop_filter = "AGE <=70", endpoint_id = 2)] + + # ACT --------------------------------------------------------------------- + actual <- add_event_index(ep = ep, dat) + + # EXPECT ------------------------------------------------------------------ + + expect_equal(actual$event_index[[1]], dat$dat[[1]][SAFFL == "Y" & + AGE > 70][["INDEX_"]]) + expect_equal(actual$event_index[[2]], dat$dat[[1]][SAFFL == "Y" & + AGE <= 70][["INDEX_"]]) +}) test_that("add_event_index works over expanded endpoints", { @@ -104,8 +108,10 @@ test_that("add_event_index works over expanded endpoints", { fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_expanded <- expand_over_endpoints(ep = ep_and_data$ep, ep_and_data$analysis_data_container) @@ -116,16 +122,19 @@ test_that("add_event_index works over expanded endpoints", { # EXPECT ------------------------------------------------------------------ for (i in 1:nrow(actual)) { - expect_equal(actual$event_index[[i]], - ep_and_data$analysis_data_container$dat[[1]][SAFFL == "Y" & - eval(parse(text = - actual$endpoint_group_filter[[i]]))][["INDEX_"]]) + expect_equal( + actual$event_index[[i]], + ep_and_data$analysis_data_container$dat[[1]][SAFFL == "Y" & + eval(parse( + text = + actual$endpoint_group_filter[[i]] + ))][["INDEX_"]] + ) } }) -test_that("add_event_index works over expanded endpoints with endpoint filter", -{ +test_that("add_event_index works over expanded endpoints with endpoint filter", { # SETUP ------------------------------------------------------------------- testr::skip_on_devops() ep <- mk_ep_0001_base( @@ -140,8 +149,10 @@ test_that("add_event_index works over expanded endpoints with endpoint filter", fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_expanded <- expand_over_endpoints(ep = ep_and_data$ep, ep_and_data$analysis_data_container) @@ -152,10 +163,14 @@ test_that("add_event_index works over expanded endpoints with endpoint filter", # EXPECT ------------------------------------------------------------------ for (i in 1:nrow(actual)) { - expect_equal(actual$event_index[[i]], - ep_and_data$analysis_data_container$dat[[1]][SAFFL == "Y" & - RACE %in% c("BLACK OR AFRICAN AMERICAN", "WHITE") & - eval(parse(text = - actual$endpoint_group_filter[[i]]))][["INDEX_"]]) + expect_equal( + actual$event_index[[i]], + ep_and_data$analysis_data_container$dat[[1]][SAFFL == "Y" & + RACE %in% c("BLACK OR AFRICAN AMERICAN", "WHITE") & + eval(parse( + text = + actual$endpoint_group_filter[[i]] + ))][["INDEX_"]] + ) } }) diff --git a/tests/testthat/test-apply_criterion_by_strata.R b/tests/testthat/test-apply_criterion_by_strata.R index 8f40ecd..021f19b 100644 --- a/tests/testthat/test-apply_criterion_by_strata.R +++ b/tests/testthat/test-apply_criterion_by_strata.R @@ -526,9 +526,9 @@ test_that("strata_var remains a character variable when some endpoint have been # ACT --------------------------------------------------------------------- actual <- apply_criterion_by_strata(ep, - analysis_data_container, - fn_map, - type = "by_strata_by_trt" + analysis_data_container, + fn_map, + type = "by_strata_by_trt" ) # EXPECT ------------------------------------------------------------------ @@ -541,5 +541,4 @@ test_that("strata_var remains a character variable when some endpoint have been # Check that the column type is correct expect_equal(typeof(actual[["strata_var"]]), "character") - }) diff --git a/tests/testthat/test-apply_criterion_endpoint.R b/tests/testthat/test-apply_criterion_endpoint.R index f0d996f..6633fa5 100644 --- a/tests/testthat/test-apply_criterion_endpoint.R +++ b/tests/testthat/test-apply_criterion_endpoint.R @@ -1,5 +1,4 @@ -test_that("base endpoint crit works", -{ +test_that("base endpoint crit works", { # SETUP ------------------------------------------------------------------- crit_endpoint <- function(dat, event_index, @@ -26,8 +25,10 @@ test_that("base endpoint crit works", fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_data_key <- ep_and_data$ep @@ -46,8 +47,7 @@ test_that("base endpoint crit works", expect_equal(nrow(actual), nrow(ep)) }) -test_that("base endpoint crit works with multiple endpoints", -{ +test_that("base endpoint crit works with multiple endpoints", { # SETUP ------------------------------------------------------------------- crit_endpoint <- function(dat, event_index, @@ -75,8 +75,10 @@ test_that("base endpoint crit works with multiple endpoints", fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_data_key <- ep_and_data$ep @@ -99,8 +101,7 @@ test_that("base endpoint crit works with multiple endpoints", }) -test_that("base endpoint crit works with naked function", -{ +test_that("base endpoint crit works with naked function", { # SETUP ------------------------------------------------------------------- crit_endpoint <- function(dat, event_index, @@ -127,8 +128,10 @@ test_that("base endpoint crit works with naked function", fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_data_key <- ep_and_data$ep @@ -151,8 +154,7 @@ test_that("base endpoint crit works with naked function", }) -test_that("crit fn has access to correct data from chef", -{ +test_that("crit fn has access to correct data from chef", { # SETUP ------------------------------------------------------------------- @@ -170,9 +172,9 @@ test_that("crit fn has access to correct data from chef", endpoint_group_metadata, stratify_by, subjectid_var) { - out <- all( + out <- all( nrow(dat) == 7535, - #Same as nrows in filter adam data, + # Same as nrows in filter adam data, inherits(event_index, "integer"), is.na(endpoint_group_metadata), treatment_var == "TRT01A" @@ -194,8 +196,10 @@ test_that("crit fn has access to correct data from chef", fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_data_key <- ep_and_data$ep analysis_data_container <- ep_and_data$analysis_data_container @@ -215,8 +219,7 @@ test_that("crit fn has access to correct data from chef", expect_true(actual$crit_accept_endpoint, label = "If this fails, check expectations inside crit_endpoint function") }) -test_that("error when crit fn does not return a logical value", -{ +test_that("error when crit fn does not return a logical value", { # SETUP ------------------------------------------------------------------- crit_endpoint <- function(dat, event_index, @@ -245,8 +248,10 @@ test_that("error when crit fn does not return a logical value", fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_data_key <- ep_and_data$ep @@ -261,5 +266,6 @@ test_that("error when crit fn does not return a logical value", # EXPECT ------------------------------------------------------------------ expect_error(apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map), - regexp = "The return value from the endpoint criterion function") + regexp = "The return value from the endpoint criterion function" + ) }) diff --git a/tests/testthat/test-apply_dt_filter.R b/tests/testthat/test-apply_dt_filter.R index 7d39d4a..d222101 100644 --- a/tests/testthat/test-apply_dt_filter.R +++ b/tests/testthat/test-apply_dt_filter.R @@ -1,35 +1,36 @@ -test_that('Applying filter works on low level', - { - # SETUP ------------------------------------------------------------------- - +test_that("Applying filter works on low level", { + # SETUP ------------------------------------------------------------------- - filter_str1 <- glue::glue("k2 == \"b\"") - filter_str2 <- glue::glue("k2 == \"a\" & k1 >= 5") - new_dt <- data.table::data.table(k1 = c(1, 2, 3, 4, 5), - k2 = c("a", "a", "b", "b", "a")) + filter_str1 <- glue::glue("k2 == \"b\"") + filter_str2 <- glue::glue("k2 == \"a\" & k1 >= 5") - # ACT --------------------------------------------------------------------- - out1 <- apply_dt_filter(new_dt, filter_str1) - out2 <- apply_dt_filter(new_dt, filter_str2) + new_dt <- data.table::data.table( + k1 = c(1, 2, 3, 4, 5), + k2 = c("a", "a", "b", "b", "a") + ) + # ACT --------------------------------------------------------------------- + out1 <- apply_dt_filter(new_dt, filter_str1) + out2 <- apply_dt_filter(new_dt, filter_str2) - # EXPECT ------------------------------------------------------------------ - expect_setequal(out1$k1, c(3, 4)) - expect_setequal(out2$k1, c(5)) + # EXPECT ------------------------------------------------------------------ + expect_setequal(out1$k1, c(3, 4)) + expect_setequal(out2$k1, c(5)) }) -test_that('Applying flags works on low level', -{ +test_that("Applying flags works on low level", { # SETUP ------------------------------------------------------------------- filter_str1 <- glue::glue("k2 == \"b\"") filter_str2 <- glue::glue("k2 == \"a\" & k1 >= 5") - new_dt <- data.table::data.table(k1 = c(1, 2, 3, 4, 5), - k2 = c("a", "a", "b", "b", "a")) + new_dt <- data.table::data.table( + k1 = c(1, 2, 3, 4, 5), + k2 = c("a", "a", "b", "b", "a") + ) # ACT --------------------------------------------------------------------- out1 <- apply_dt_filter(new_dt, filter_str1, type = "flag") @@ -38,22 +39,19 @@ test_that('Applying flags works on low level', # EXPECT ------------------------------------------------------------------ expect_setequal(out1$event_flag, c(FALSE, FALSE, TRUE, TRUE, FALSE)) expect_setequal(out2$event_flag, c(rep(FALSE, 5), TRUE)) - }) -test_that("Applying a simple filter works on adam level", -{ +test_that("Applying a simple filter works on adam level", { # SETUP ------------------------------------------------------------------- adam <- mk_adae() - age_max = min(adam$AGE) + 2 - filter_str = glue::glue("AGE <={age_max}") + age_max <- min(adam$AGE) + 2 + filter_str <- glue::glue("AGE <={age_max}") # ACT --------------------------------------------------------------------- out <- apply_dt_filter(adam, filter_str) # EXPECT ------------------------------------------------------------------ expect_lte(max(out$AGE), expected = age_max) - }) diff --git a/tests/testthat/test-apply_stats.R b/tests/testthat/test-apply_stats.R index 0044ca2..571cbab 100644 --- a/tests/testthat/test-apply_stats.R +++ b/tests/testthat/test-apply_stats.R @@ -1,82 +1,80 @@ -test_that("base: stat_by_strata_by_trt", - { - # SETUP ------------------------------------------------------------------- - - skip_on_devops() - ep <- mk_ep_0001_base( - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - stat_by_strata_by_trt = list(n_sub = n_sub) - ) - - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data( - study_metadata = list(), - fn_dt = user_def_fn - ) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - ep_data_key <- ep_and_data$ep - analysis_data_container <- - ep_and_data$analysis_data_container - ep_expanded <- - expand_over_endpoints(ep_data_key, analysis_data_container) - ep_ev_index <- - add_event_index(ep_expanded, analysis_data_container) - ep_crit_endpoint <- - apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) - crit_accept_by_strata_by_trt <- - apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") - crit_accept_by_strata_across_trt <- - apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") - - ep_crit_by_strata_by_trt <- prepare_for_stats( - crit_accept_by_strata_across_trt, +test_that("base: stat_by_strata_by_trt", { + # SETUP ------------------------------------------------------------------- + + skip_on_devops() + ep <- mk_ep_0001_base( + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + stat_by_strata_by_trt = list(n_sub = n_sub) + ) + + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) + + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + ep_data_key <- ep_and_data$ep + analysis_data_container <- + ep_and_data$analysis_data_container + ep_expanded <- + expand_over_endpoints(ep_data_key, analysis_data_container) + ep_ev_index <- + add_event_index(ep_expanded, analysis_data_container) + ep_crit_endpoint <- + apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) + crit_accept_by_strata_by_trt <- + apply_criterion_by_strata(ep_crit_endpoint, analysis_data_container, fn_map, - type = "stat_by_strata_by_trt" + type = "by_strata_by_trt" + ) + crit_accept_by_strata_across_trt <- + apply_criterion_by_strata(crit_accept_by_strata_by_trt, + analysis_data_container, + fn_map, + type = "by_strata_across_trt" ) - # ACT --------------------------------------------------------------------- - - actual <- - apply_stats(ep_crit_by_strata_by_trt, - analysis_data_container, - type = "stat_by_strata_by_trt") + ep_crit_by_strata_by_trt <- prepare_for_stats( + crit_accept_by_strata_across_trt, + analysis_data_container, + fn_map, + type = "stat_by_strata_by_trt" + ) - # EXPECT ------------------------------------------------------------------ + # ACT --------------------------------------------------------------------- - expect_equal(nrow(actual), 9) - expect_equal(setdiff(names(actual), names(ep_crit_by_strata_by_trt)), "stat_result") + actual <- + apply_stats(ep_crit_by_strata_by_trt, + analysis_data_container, + type = "stat_by_strata_by_trt" + ) - for (i in 1:nrow(actual)){ - stats <- actual[["stat_result"]][[i]] - expect_true(is.data.table(stats)) - expect_equal(nrow(stats), 1) - expect_same_items(names(stats), c( "label", "description", "qualifiers", "value")) - } + # EXPECT ------------------------------------------------------------------ + expect_equal(nrow(actual), 9) + expect_equal(setdiff(names(actual), names(ep_crit_by_strata_by_trt)), "stat_result") + for (i in 1:nrow(actual)) { + stats <- actual[["stat_result"]][[i]] + expect_true(is.data.table(stats)) + expect_equal(nrow(stats), 1) + expect_same_items(names(stats), c("label", "description", "qualifiers", "value")) + } }) -test_that("validate: by_strata_by_trt returns same value as manual calculation with period flag", -{ - +test_that("validate: by_strata_by_trt returns same value as manual calculation with period flag", { # SETUP ------------------------------------------------------------------- skip_on_devops() ep <- mk_ep_0001_base( @@ -114,14 +112,16 @@ test_that("validate: by_strata_by_trt returns same value as manual calculation w apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) crit_accept_by_strata_by_trt <- apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") + analysis_data_container, + fn_map, + type = "by_strata_by_trt" + ) crit_accept_by_strata_across_trt <- apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") + analysis_data_container, + fn_map, + type = "by_strata_across_trt" + ) ep_crit_by_strata_by_trt <- prepare_for_stats( crit_accept_by_strata_across_trt, @@ -134,12 +134,13 @@ test_that("validate: by_strata_by_trt returns same value as manual calculation w # ACT --------------------------------------------------------------------- actual <- apply_stats(ep_crit_by_strata_by_trt, - analysis_data_container, - type = "stat_by_strata_by_trt") + analysis_data_container, + type = "stat_by_strata_by_trt" + ) # EXPECT ------------------------------------------------------------------ expected_counts <- pharmaverseadam::adcm %>% - as.data.table() %>% + as.data.table() %>% .[SAFFL == "Y" & ANL01FL == "Y" & AOCCPFL == "Y"] %>% unique(., by = c("SUBJID")) %>% .[, .N, by = TRT01A] %>% @@ -152,170 +153,169 @@ test_that("validate: by_strata_by_trt returns same value as manual calculation w .[["value"]] expect_equal(actual_counts, expected_counts, label = "Event counts match") - }) -test_that("by_strata_by_trt returns same value as manual calculation without period flag", - { - - # SETUP ------------------------------------------------------------------- - skip_on_devops() - ep <- mk_ep_0001_base( - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - endpoint_filter = "AOCCPFL=='Y'", - stat_by_strata_by_trt = list(n_subev = n_subev), - period_var = "ANL01FL", - period_value = "Y" - ) - - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data( - study_metadata = list(), - fn_dt = user_def_fn - ) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - ep_data_key <- ep_and_data$ep - analysis_data_container <- - ep_and_data$analysis_data_container - ep_expanded <- - expand_over_endpoints(ep_data_key, analysis_data_container) - ep_ev_index <- - add_event_index(ep_expanded, analysis_data_container) - ep_crit_endpoint <- - apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) - crit_accept_by_strata_by_trt <- - apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") - crit_accept_by_strata_across_trt <- - apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") - - ep_crit_by_strata_by_trt <- prepare_for_stats( - crit_accept_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_by_strata_by_trt" - ) - - - # ACT --------------------------------------------------------------------- - actual <- - apply_stats(ep_crit_by_strata_by_trt, - analysis_data_container, - type = "stat_by_strata_by_trt") - - # EXPECT ------------------------------------------------------------------ - expected_counts <- pharmaverseadam::adcm %>% - as.data.table() %>% - .[SAFFL == "Y" & AOCCPFL == "Y"] %>% - unique(., by = c("SUBJID")) %>% - .[, .N, by = TRT01A] %>% - .[["N"]] - - actual_counts <- - actual[strata_var == "TOTAL_" & fn_name == "n_subev"] %>% - .[, stat_result] %>% - rbindlist() %>% - .[["value"]] - - expect_equal(actual_counts, expected_counts, label = "Event counts match") - - }) - - -test_that("validate: n_sub return correct value", - { - - # SETUP ------------------------------------------------------------------- - skip_on_devops() - ep <- mk_ep_0001_base( - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - stat_by_strata_by_trt = list(n_sub = n_sub) - ) - - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data( - study_metadata = list(), - fn_dt = user_def_fn - ) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - ep_data_key <- ep_and_data$ep - analysis_data_container <- - ep_and_data$analysis_data_container - ep_expanded <- - expand_over_endpoints(ep_data_key, analysis_data_container) - ep_ev_index <- - add_event_index(ep_expanded, analysis_data_container) - ep_crit_endpoint <- - apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) - crit_accept_by_strata_by_trt <- - apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") - crit_accept_by_strata_across_trt <- - apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") - - ep_crit_by_strata_by_trt <- prepare_for_stats( - crit_accept_by_strata_across_trt, +test_that("by_strata_by_trt returns same value as manual calculation without period flag", { + # SETUP ------------------------------------------------------------------- + skip_on_devops() + ep <- mk_ep_0001_base( + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + endpoint_filter = "AOCCPFL=='Y'", + stat_by_strata_by_trt = list(n_subev = n_subev), + period_var = "ANL01FL", + period_value = "Y" + ) + + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) + + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + ep_data_key <- ep_and_data$ep + analysis_data_container <- + ep_and_data$analysis_data_container + ep_expanded <- + expand_over_endpoints(ep_data_key, analysis_data_container) + ep_ev_index <- + add_event_index(ep_expanded, analysis_data_container) + ep_crit_endpoint <- + apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) + crit_accept_by_strata_by_trt <- + apply_criterion_by_strata(ep_crit_endpoint, analysis_data_container, fn_map, + type = "by_strata_by_trt" + ) + crit_accept_by_strata_across_trt <- + apply_criterion_by_strata(crit_accept_by_strata_by_trt, + analysis_data_container, + fn_map, + type = "by_strata_across_trt" + ) + + ep_crit_by_strata_by_trt <- prepare_for_stats( + crit_accept_by_strata_across_trt, + analysis_data_container, + fn_map, + type = "stat_by_strata_by_trt" + ) + + + # ACT --------------------------------------------------------------------- + actual <- + apply_stats(ep_crit_by_strata_by_trt, + analysis_data_container, type = "stat_by_strata_by_trt" ) + # EXPECT ------------------------------------------------------------------ + expected_counts <- pharmaverseadam::adcm %>% + as.data.table() %>% + .[SAFFL == "Y" & AOCCPFL == "Y"] %>% + unique(., by = c("SUBJID")) %>% + .[, .N, by = TRT01A] %>% + .[["N"]] - # ACT --------------------------------------------------------------------- + actual_counts <- + actual[strata_var == "TOTAL_" & fn_name == "n_subev"] %>% + .[, stat_result] %>% + rbindlist() %>% + .[["value"]] - actual <- - apply_stats(ep_crit_by_strata_by_trt, - analysis_data_container, - type = "stat_by_strata_by_trt") + expect_equal(actual_counts, expected_counts, label = "Event counts match") +}) - # EXPECT ------------------------------------------------------------------ - adsl <- pharmaverseadam::adsl |> setDT() - expected_counts <- adsl[TRT01A == "Placebo" & SAFFL == "Y"] |> - unique(by = "USUBJID") |> - nrow() +test_that("validate: n_sub return correct value", { + # SETUP ------------------------------------------------------------------- + skip_on_devops() + ep <- mk_ep_0001_base( + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + stat_by_strata_by_trt = list(n_sub = n_sub) + ) - actual_counts <- - actual[strata_var == "TOTAL_" & fn_name == "n_sub"][, stat_result] |> - rbindlist() + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) - expect_equal(actual_counts$value[[1]], expected_counts, label = "Event counts match") + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + ep_data_key <- ep_and_data$ep + analysis_data_container <- + ep_and_data$analysis_data_container + ep_expanded <- + expand_over_endpoints(ep_data_key, analysis_data_container) + ep_ev_index <- + add_event_index(ep_expanded, analysis_data_container) + ep_crit_endpoint <- + apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) + crit_accept_by_strata_by_trt <- + apply_criterion_by_strata(ep_crit_endpoint, + analysis_data_container, + fn_map, + type = "by_strata_by_trt" + ) + crit_accept_by_strata_across_trt <- + apply_criterion_by_strata(crit_accept_by_strata_by_trt, + analysis_data_container, + fn_map, + type = "by_strata_across_trt" + ) + + ep_crit_by_strata_by_trt <- prepare_for_stats( + crit_accept_by_strata_across_trt, + analysis_data_container, + fn_map, + type = "stat_by_strata_by_trt" + ) + + + # ACT --------------------------------------------------------------------- + + actual <- + apply_stats(ep_crit_by_strata_by_trt, + analysis_data_container, + type = "stat_by_strata_by_trt" + ) + + # EXPECT ------------------------------------------------------------------ + + adsl <- pharmaverseadam::adsl |> setDT() + expected_counts <- adsl[TRT01A == "Placebo" & SAFFL == "Y"] |> + unique(by = "USUBJID") |> + nrow() + + actual_counts <- + actual[strata_var == "TOTAL_" & fn_name == "n_sub"][, stat_result] |> + rbindlist() + + + expect_equal(actual_counts$value[[1]], expected_counts, label = "Event counts match") }) -test_that("apply_stats stat_by_strata_across_trt", -{ +test_that("apply_stats stat_by_strata_across_trt", { # SETUP ------------------------------------------------------------------- skip_on_devops() @@ -353,14 +353,16 @@ test_that("apply_stats stat_by_strata_across_trt", apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) crit_accept_by_strata_by_trt <- apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") + analysis_data_container, + fn_map, + type = "by_strata_by_trt" + ) crit_accept_by_strata_across_trt <- apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") + analysis_data_container, + fn_map, + type = "by_strata_across_trt" + ) ep_crit_by_strata_by_trt <- prepare_for_stats( crit_accept_by_strata_across_trt, @@ -373,27 +375,26 @@ test_that("apply_stats stat_by_strata_across_trt", actual <- apply_stats( ep_crit_by_strata_by_trt, - analysis_data_container, type = "stat_by_strata_across_trt") + analysis_data_container, + type = "stat_by_strata_across_trt" + ) # EXPECT ------------------------------------------------------------------ expect_equal(nrow(actual), 3) expect_equal(setdiff(names(actual), names(ep_crit_by_strata_by_trt)), "stat_result") - for (i in 1:nrow(actual)){ + for (i in 1:nrow(actual)) { stats <- actual[["stat_result"]][[i]] expect_true(is.data.table(stats)) expect_equal(nrow(stats), 1) - expect_same_items(names(stats), c( "label", "description", "qualifiers", "value")) - + expect_same_items(names(stats), c("label", "description", "qualifiers", "value")) } - }) -test_that("apply_stats stat_across_strata_across_trt when no across_strata_across_trt fn supplied", -{ +test_that("apply_stats stat_across_strata_across_trt when no across_strata_across_trt fn supplied", { # SETUP ------------------------------------------------------------------- skip_on_devops() ep <- mk_endpoint_str( @@ -436,14 +437,16 @@ test_that("apply_stats stat_across_strata_across_trt when no across_strata_acros apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) crit_accept_by_strata_by_trt <- apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") + analysis_data_container, + fn_map, + type = "by_strata_by_trt" + ) crit_accept_by_strata_across_trt <- apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") + analysis_data_container, + fn_map, + type = "by_strata_across_trt" + ) ep_crit_by_strata_by_trt <- prepare_for_stats( crit_accept_by_strata_across_trt, @@ -464,96 +467,95 @@ test_that("apply_stats stat_across_strata_across_trt when no across_strata_acros # EXPECT ------------------------------------------------------------------ expect_equal(actual, data.table(NULL)) - }) -test_that("apply_stats: with all FALSE for criteria", - { - # SETUP ------------------------------------------------------------------- +test_that("apply_stats: with all FALSE for criteria", { + # SETUP ------------------------------------------------------------------- + + skip_on_devops() + + crit_false <- function(...) FALSE + + ep <- mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + period_var = "ANL01FL", + period_value = "Y", + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + endpoint_label = "A", + stat_by_strata_by_trt = list("n_events" = n_subev), + crit_by_strata_by_trt = crit_false + ) - skip_on_devops() + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) - crit_false <- function(...)FALSE + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) - ep <- mk_endpoint_str( + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - endpoint_label = "A", - stat_by_strata_by_trt = list("n_events" = n_subev), - crit_by_strata_by_trt = crit_false - ) - - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data( - study_metadata = list(), - fn_dt = user_def_fn - ) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - ep_data_key <- ep_and_data$ep - analysis_data_container <- - ep_and_data$analysis_data_container - ep_expanded <- - expand_over_endpoints(ep_data_key, analysis_data_container) - ep_ev_index <- - add_event_index(ep_expanded, analysis_data_container) - ep_crit_endpoint <- - apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) - crit_accept_by_strata_by_trt <- - apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") - crit_accept_by_strata_across_trt <- - apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") - - ep_prep_by_strata_by_trt <- prepare_for_stats( - crit_accept_by_strata_across_trt, + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + ep_data_key <- ep_and_data$ep + analysis_data_container <- + ep_and_data$analysis_data_container + ep_expanded <- + expand_over_endpoints(ep_data_key, analysis_data_container) + ep_ev_index <- + add_event_index(ep_expanded, analysis_data_container) + ep_crit_endpoint <- + apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) + crit_accept_by_strata_by_trt <- + apply_criterion_by_strata(ep_crit_endpoint, analysis_data_container, fn_map, - type = "stat_by_strata_by_trt" + type = "by_strata_by_trt" + ) + crit_accept_by_strata_across_trt <- + apply_criterion_by_strata(crit_accept_by_strata_by_trt, + analysis_data_container, + fn_map, + type = "by_strata_across_trt" ) - # ACT --------------------------------------------------------------------- - - actual <- - apply_stats(ep_prep_by_strata_by_trt, - analysis_data_container, - type = "stat_by_strata_by_trt") + ep_prep_by_strata_by_trt <- prepare_for_stats( + crit_accept_by_strata_across_trt, + analysis_data_container, + fn_map, + type = "stat_by_strata_by_trt" + ) - # EXPECT ------------------------------------------------------------------ + # ACT --------------------------------------------------------------------- - expect_equal( - nrow(actual), - nrow(ep_prep_by_strata_by_trt) - ) - expect_true( - all(unlist(lapply(actual$stat_results, is.null))) + actual <- + apply_stats(ep_prep_by_strata_by_trt, + analysis_data_container, + type = "stat_by_strata_by_trt" ) - }) + # EXPECT ------------------------------------------------------------------ + + expect_equal( + nrow(actual), + nrow(ep_prep_by_strata_by_trt) + ) + expect_true( + all(unlist(lapply(actual$stat_results, is.null))) + ) +}) -test_that("Complex application of stats functions", -{ +test_that("Complex application of stats functions", { # SETUP ------------------------------------------------------------------- # Statistical function across strata and treatment arms (does not make much @@ -596,8 +598,10 @@ test_that("Complex application of stats functions", ep_fn_map <- suppressWarnings(unnest_endpoint_functions(ep)) user_def_fn <- mk_userdef_fn_dt(ep_fn_map, env = environment()) fn_map <- merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- fetch_db_data(study_metadata = ep$study_metadata[[1]], - fn_dt = user_def_fn) + adam_db <- fetch_db_data( + study_metadata = ep$study_metadata[[1]], + fn_dt = user_def_fn + ) ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) ep_data_key <- ep_and_data$ep analysis_data_container <- @@ -610,14 +614,16 @@ test_that("Complex application of stats functions", apply_criterion_endpoint(ep_ev_index, analysis_data_container, fn_map) crit_accept_by_strata_by_trt <- apply_criterion_by_strata(ep_crit_endpoint, - analysis_data_container, - fn_map, - type = "by_strata_by_trt") + analysis_data_container, + fn_map, + type = "by_strata_by_trt" + ) crit_accept_by_strata_across_trt <- apply_criterion_by_strata(crit_accept_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt") + analysis_data_container, + fn_map, + type = "by_strata_across_trt" + ) ep_prep_by_strata_by_trt <- prepare_for_stats( crit_accept_by_strata_across_trt, @@ -628,38 +634,45 @@ test_that("Complex application of stats functions", ep_prep_by_strata_across_trt <- prepare_for_stats(crit_accept_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_by_strata_across_trt") + analysis_data_container, + fn_map, + type = "stat_by_strata_across_trt" + ) ep_prep_across_strata_across_trt <- prepare_for_stats(crit_accept_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_across_strata_across_trt") + analysis_data_container, + fn_map, + type = "stat_across_strata_across_trt" + ) # ACT --------------------------------------------------------------------- ep_stat_by_strata_by_trt <- apply_stats(ep_prep_by_strata_by_trt, - analysis_data_container, - type = "stat_by_strata_by_trt") + analysis_data_container, + type = "stat_by_strata_by_trt" + ) ep_stat_by_strata_across_trt <- apply_stats(ep_prep_by_strata_across_trt, - analysis_data_container, - type = "stat_by_strata_across_trt") + analysis_data_container, + type = "stat_by_strata_across_trt" + ) ep_stat_across_strata_across_trt <- apply_stats(ep_prep_across_strata_across_trt, - analysis_data_container, - type = "stat_across_strata_across_trt") + analysis_data_container, + type = "stat_across_strata_across_trt" + ) ep_stat <- - rbind(ep_stat_by_strata_by_trt, - ep_stat_by_strata_across_trt, - ep_stat_across_strata_across_trt) %>% + rbind( + ep_stat_by_strata_by_trt, + ep_stat_by_strata_across_trt, + ep_stat_across_strata_across_trt + ) %>% tidyr::unnest(cols = stat_result) %>% as.data.table() @@ -672,22 +685,29 @@ test_that("Complex application of stats functions", expect_equal(sum(ep_stat$fn_type == "stat_across_strata_across_trt"), 18) # stat_by_strata_by_trt statistics - expect_equal(ep_stat[ep_stat$fn_type == "stat_by_strata_by_trt" & - ep_stat$endpoint_group_filter == 'CMCLAS == "SYSTEMIC HORMONAL PREPARATIONS, EXCL."'][["value"]], - c(2, 8, 1, 1, 2, 6, 2, 0, 6, 2)) - expect_equal(ep_stat[ep_stat$fn_type == "stat_by_strata_by_trt" & - ep_stat$endpoint_group_filter == 'CMCLAS == "RESPIRATORY SYSTEM"'][["value"]], - c(1, 1, 0, 1, 1, 0, 0, 1, 0, 1)) + expect_equal( + ep_stat[ep_stat$fn_type == "stat_by_strata_by_trt" & + ep_stat$endpoint_group_filter == 'CMCLAS == "SYSTEMIC HORMONAL PREPARATIONS, EXCL."'][["value"]], + c(2, 8, 1, 1, 2, 6, 2, 0, 6, 2) + ) + expect_equal( + ep_stat[ep_stat$fn_type == "stat_by_strata_by_trt" & + ep_stat$endpoint_group_filter == 'CMCLAS == "RESPIRATORY SYSTEM"'][["value"]], + c(1, 1, 0, 1, 1, 0, 0, 1, 0, 1) + ) # stat_by_strata_across_trt statistics - expect_equal(ep_stat[ep_stat$fn_type == "stat_by_strata_across_trt" & - ep_stat$endpoint_group_filter == 'CMCLAS == "SYSTEMIC HORMONAL PREPARATIONS, EXCL."'][["value"]], - c(10, 3, 7, 8, 2)) - expect_equal(ep_stat[ep_stat$fn_type == "stat_by_strata_across_trt" & - ep_stat$endpoint_group_filter == 'CMCLAS == "RESPIRATORY SYSTEM"'][["value"]], - c(2, 1, 1, 0, 2)) + expect_equal( + ep_stat[ep_stat$fn_type == "stat_by_strata_across_trt" & + ep_stat$endpoint_group_filter == 'CMCLAS == "SYSTEMIC HORMONAL PREPARATIONS, EXCL."'][["value"]], + c(10, 3, 7, 8, 2) + ) + expect_equal( + ep_stat[ep_stat$fn_type == "stat_by_strata_across_trt" & + ep_stat$endpoint_group_filter == 'CMCLAS == "RESPIRATORY SYSTEM"'][["value"]], + c(2, 1, 1, 0, 2) + ) # stat_across_strata_across_trt statistics expect_true(all(ep_stat[ep_stat$fn_type == "stat_across_strata_across_trt"][["value"]] == 158)) - }) diff --git a/tests/testthat/test-check_duplicate_functions.R b/tests/testthat/test-check_duplicate_functions.R index 25e66a2..af52495 100644 --- a/tests/testthat/test-check_duplicate_functions.R +++ b/tests/testthat/test-check_duplicate_functions.R @@ -1,8 +1,7 @@ -test_that("check_duplicate_functions handles empty directory correctly", - { - testr::create_local_project() - expect_null(check_duplicate_functions("R/")) - }) +test_that("check_duplicate_functions handles empty directory correctly", { + testr::create_local_project() + expect_null(check_duplicate_functions("R/")) +}) test_that( "check_duplicate_functions handles directory with no duplicate function names correctly", @@ -14,20 +13,21 @@ test_that( } ) -test_that("check_duplicate_functions correctly identifies duplicate function names", - { - testr::create_local_project() - write("f1 <- function(){}", "R/tmp.R") - write("f1 <- function(){}", "R/tmp.R", append = TRUE) - expect_error( - check_duplicate_functions("R/"), "The following functions") - - }) +test_that("check_duplicate_functions correctly identifies duplicate function names", { + testr::create_local_project() + write("f1 <- function(){}", "R/tmp.R") + write("f1 <- function(){}", "R/tmp.R", append = TRUE) + expect_error( + check_duplicate_functions("R/"), "The following functions" + ) +}) test_that("check_duplicate_functions handles non-existent directory correctly", { testr::create_local_project() - expect_error(check_duplicate_functions("R_fun"), - "Directory R_fun does not exist") + expect_error( + check_duplicate_functions("R_fun"), + "Directory R_fun does not exist" + ) }) test_that("check_duplicate_functions handles directory with non-R files correctly", { @@ -52,16 +52,19 @@ test_that("check_duplicate_functions correctly identifies all duplicate function write("f2 <- function(){}", "R/tmp.R", append = TRUE) expect_error( - check_duplicate_functions("R"), regexp = "-f2") - + check_duplicate_functions("R"), + regexp = "-f2" + ) }) test_that("check_duplicate_functions handles function definitions with different parameters but same name correctly", { testr::create_local_project() write("f1 <- function(x){x}", "R/tmp.R") write("f1 <- function(y){y}", "R/tmp.R", append = TRUE) - expect_error(check_duplicate_functions("R"), - "f1") + expect_error( + check_duplicate_functions("R"), + "f1" + ) }) diff --git a/tests/testthat/test-construct_filter_logic.R b/tests/testthat/test-construct_filter_logic.R index 009ef62..dc4ca84 100644 --- a/tests/testthat/test-construct_filter_logic.R +++ b/tests/testthat/test-construct_filter_logic.R @@ -42,8 +42,6 @@ test_that("constructing filter logic works with non-paired filters (singletons)" construct_data_filter_logic(list( c(ep$pop_var[[1]], ep$pop_value[[1]]), c(ep$period_var[[1]], ep$period_value[[1]]) - ),singletons = ep$endpoint_filter[[1]] - ) + ), singletons = ep$endpoint_filter[[1]]) expect_equal(actual1, "A==\"TT\" & period==\"F\" & AGE < 50") - }) diff --git a/tests/testthat/test-endpoint_bookkeeping.R b/tests/testthat/test-endpoint_bookkeeping.R index 81b0219..a79dae8 100644 --- a/tests/testthat/test-endpoint_bookkeeping.R +++ b/tests/testthat/test-endpoint_bookkeeping.R @@ -1,6 +1,5 @@ test_that("Bookkeeping of rejected endpoints/strata", { - -# SETUP ------------------------------------------------------------------- + # SETUP ------------------------------------------------------------------- crit_ep <- function(dat, event_index, @@ -21,13 +20,13 @@ test_that("Bookkeeping of rejected endpoints/strata", { endpoint_group_metadata, strata_var, ...) { - if (endpoint_group_metadata[["AESOC"]] == "CARDIAC DISORDERS" | - (endpoint_group_metadata[["AESOC"]] == "INFECTIONS AND INFESTATIONS" & strata_var == "TOTAL_") | - (endpoint_group_metadata[["AESOC"]] %in% c("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", "VASCULAR DISORDERS") & - strata_var %in% c("TOTAL_", "AGEGR2") - )) { + if (endpoint_group_metadata[["AESOC"]] == "CARDIAC DISORDERS" | + (endpoint_group_metadata[["AESOC"]] == "INFECTIONS AND INFESTATIONS" & strata_var == "TOTAL_") | + (endpoint_group_metadata[["AESOC"]] %in% c("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", "VASCULAR DISORDERS") & + strata_var %in% c("TOTAL_", "AGEGR2") + )) { return(TRUE) - } else{ + } else { return(FALSE) } } @@ -42,7 +41,7 @@ test_that("Bookkeeping of rejected endpoints/strata", { "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" ) | (endpoint_group_metadata[["AESOC"]] == "VASCULAR DISORDERS" & strata_var == "TOTAL_")) { return(TRUE) - } else{ + } else { return(FALSE) } } @@ -53,8 +52,10 @@ test_that("Bookkeeping of rejected endpoints/strata", { custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", endpoint_label = "Test: ", group_by = list(list(AESOC = c())), - stat_by_strata_by_trt = list("n_subev" = n_subev, - "p_subev" = p_subev), + stat_by_strata_by_trt = list( + "n_subev" = n_subev, + "p_subev" = p_subev + ), stat_by_strata_across_trt = list("n_subev_trt_diff" = n_subev_trt_diff), stat_across_strata_across_trt = list("P-interaction" = contingency2x2_strata_test), crit_endpoint = list(crit_ep), @@ -92,147 +93,149 @@ test_that("Bookkeeping of rejected endpoints/strata", { apply_criterion_by_strata(ep_crit_endpoint, analysis_data_container, fn_map) ep_crit_by_strata_across_trt <- apply_criterion_by_strata(ep_crit_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt" + analysis_data_container, + fn_map, + type = "by_strata_across_trt" ) -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- ep_prep_by_strata_by_trt <- prepare_for_stats(ep_crit_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_by_strata_by_trt") + analysis_data_container, + fn_map, + type = "stat_by_strata_by_trt" + ) ep_prep_by_strata_across_trt <- prepare_for_stats(ep_crit_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_by_strata_across_trt") + analysis_data_container, + fn_map, + type = "stat_by_strata_across_trt" + ) ep_prep_across_strata_across_trt <- prepare_for_stats(ep_crit_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_across_strata_across_trt") + analysis_data_container, + fn_map, + type = "stat_across_strata_across_trt" + ) ep_rejected <- ep_crit_by_strata_across_trt[!(crit_accept_endpoint) | - !(crit_accept_by_strata_by_trt) | - !(crit_accept_by_strata_across_trt)] + !(crit_accept_by_strata_by_trt) | + !(crit_accept_by_strata_across_trt)] -# EXPECT ------------------------------------------------------------------ + # EXPECT ------------------------------------------------------------------ # by_strata_by_trt: Summary expect_equal(nrow(ep_prep_by_strata_by_trt), 48) - expect_equal(nrow(ep_prep_by_strata_by_trt[fn_name == "n_subev",]), 24) - expect_equal(nrow(ep_prep_by_strata_by_trt[fn_name == "p_subev",]), 24) + expect_equal(nrow(ep_prep_by_strata_by_trt[fn_name == "n_subev", ]), 24) + expect_equal(nrow(ep_prep_by_strata_by_trt[fn_name == "p_subev", ]), 24) # by_strata_by_trt: CARDIAC DISORDERS - ep_soc1 <- ep_prep_by_strata_by_trt[grepl("CARDIAC DISORDERS", endpoint_label),] + ep_soc1 <- ep_prep_by_strata_by_trt[grepl("CARDIAC DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc1), 20) - expect_equal(nrow(ep_soc1[strata_var == "TOTAL_",]), 4) - expect_equal(nrow(ep_soc1[strata_var == "SEX",]), 8) - expect_equal(nrow(ep_soc1[strata_var == "AGEGR2",]), 8) + expect_equal(nrow(ep_soc1[strata_var == "TOTAL_", ]), 4) + expect_equal(nrow(ep_soc1[strata_var == "SEX", ]), 8) + expect_equal(nrow(ep_soc1[strata_var == "AGEGR2", ]), 8) # by_strata_by_trt: INFECTIONS AND INFESTATIONS - ep_soc2 <- ep_prep_by_strata_by_trt[grepl("INFECTIONS AND INFESTATIONS", endpoint_label),] + ep_soc2 <- ep_prep_by_strata_by_trt[grepl("INFECTIONS AND INFESTATIONS", endpoint_label), ] expect_equal(nrow(ep_soc2), 4) - expect_equal(nrow(ep_soc2[strata_var == "TOTAL_",]), 4) + expect_equal(nrow(ep_soc2[strata_var == "TOTAL_", ]), 4) # by_strata_by_trt: SKIN AND SUBCUTANEOUS TISSUE DISORDERS - ep_soc3 <- ep_prep_by_strata_by_trt[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label),] + ep_soc3 <- ep_prep_by_strata_by_trt[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc3), 12) - expect_equal(nrow(ep_soc3[strata_var == "TOTAL_",]), 4) - expect_equal(nrow(ep_soc3[strata_var == "AGEGR2",]), 8) + expect_equal(nrow(ep_soc3[strata_var == "TOTAL_", ]), 4) + expect_equal(nrow(ep_soc3[strata_var == "AGEGR2", ]), 8) # by_strata_by_trt: VASCULAR DISORDERS - ep_soc4 <- ep_prep_by_strata_by_trt[grepl("VASCULAR DISORDERS", endpoint_label),] + ep_soc4 <- ep_prep_by_strata_by_trt[grepl("VASCULAR DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc4), 12) - expect_equal(nrow(ep_soc4[strata_var == "TOTAL_",]), 4) - expect_equal(nrow(ep_soc4[strata_var == "AGEGR2",]), 8) + expect_equal(nrow(ep_soc4[strata_var == "TOTAL_", ]), 4) + expect_equal(nrow(ep_soc4[strata_var == "AGEGR2", ]), 8) # by_strata_across_trt: Summary expect_equal(nrow(ep_prep_by_strata_across_trt), 10) - expect_equal(nrow(ep_prep_by_strata_across_trt[fn_name == "n_subev_trt_diff",]), 10) + expect_equal(nrow(ep_prep_by_strata_across_trt[fn_name == "n_subev_trt_diff", ]), 10) # by_strata_across_trt: CARDIAC DISORDERS - ep_soc5 <- ep_prep_by_strata_across_trt[grepl("CARDIAC DISORDERS", endpoint_label),] + ep_soc5 <- ep_prep_by_strata_across_trt[grepl("CARDIAC DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc5), 5) - expect_equal(nrow(ep_soc5[strata_var == "TOTAL_",]), 1) - expect_equal(nrow(ep_soc5[strata_var == "SEX",]), 2) - expect_equal(nrow(ep_soc5[strata_var == "AGEGR2",]), 2) + expect_equal(nrow(ep_soc5[strata_var == "TOTAL_", ]), 1) + expect_equal(nrow(ep_soc5[strata_var == "SEX", ]), 2) + expect_equal(nrow(ep_soc5[strata_var == "AGEGR2", ]), 2) # by_strata_across_trt: INFECTIONS AND INFESTATIONS - ep_soc6 <- ep_prep_by_strata_across_trt[grepl("INFECTIONS AND INFESTATIONS", endpoint_label),] + ep_soc6 <- ep_prep_by_strata_across_trt[grepl("INFECTIONS AND INFESTATIONS", endpoint_label), ] expect_equal(nrow(ep_soc6), 1) - expect_equal(nrow(ep_soc6[strata_var == "TOTAL_",]), 1) + expect_equal(nrow(ep_soc6[strata_var == "TOTAL_", ]), 1) # by_strata_across_trt: SKIN AND SUBCUTANEOUS TISSUE DISORDERS - ep_soc7 <- ep_prep_by_strata_across_trt[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label),] + ep_soc7 <- ep_prep_by_strata_across_trt[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc7), 3) - expect_equal(nrow(ep_soc7[strata_var == "TOTAL_",]), 1) - expect_equal(nrow(ep_soc7[strata_var == "AGEGR2",]), 2) + expect_equal(nrow(ep_soc7[strata_var == "TOTAL_", ]), 1) + expect_equal(nrow(ep_soc7[strata_var == "AGEGR2", ]), 2) # by_strata_across_trt: VASCULAR DISORDERS - ep_soc8 <- ep_prep_by_strata_across_trt[grepl("VASCULAR DISORDERS", endpoint_label),] + ep_soc8 <- ep_prep_by_strata_across_trt[grepl("VASCULAR DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc8), 1) - expect_equal(nrow(ep_soc8[strata_var == "TOTAL_",]), 1) + expect_equal(nrow(ep_soc8[strata_var == "TOTAL_", ]), 1) # across_strata_across_trt: Summary expect_equal(nrow(ep_prep_across_strata_across_trt), 3) - expect_equal(nrow(ep_prep_across_strata_across_trt[fn_name == "P-interaction",]), 3) + expect_equal(nrow(ep_prep_across_strata_across_trt[fn_name == "P-interaction", ]), 3) # across_strata_across_trt: CARDIAC DISORDERS - ep_soc9 <- ep_prep_across_strata_across_trt[grepl("CARDIAC DISORDERS", endpoint_label),] + ep_soc9 <- ep_prep_across_strata_across_trt[grepl("CARDIAC DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc9), 2) - expect_equal(nrow(ep_soc9[strata_var == "SEX",]), 1) - expect_equal(nrow(ep_soc9[strata_var == "AGEGR2",]), 1) + expect_equal(nrow(ep_soc9[strata_var == "SEX", ]), 1) + expect_equal(nrow(ep_soc9[strata_var == "AGEGR2", ]), 1) # across_strata_across_trt: INFECTIONS AND INFESTATIONS - ep_soc10 <- ep_prep_across_strata_across_trt[grepl("INFECTIONS AND INFESTATIONS", endpoint_label),] + ep_soc10 <- ep_prep_across_strata_across_trt[grepl("INFECTIONS AND INFESTATIONS", endpoint_label), ] expect_equal(nrow(ep_soc10), 0) # across_strata_across_trt: SKIN AND SUBCUTANEOUS TISSUE DISORDERS - ep_soc11 <- ep_prep_across_strata_across_trt[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label),] + ep_soc11 <- ep_prep_across_strata_across_trt[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc11), 1) - expect_equal(nrow(ep_soc11[strata_var == "AGEGR2",]), 1) + expect_equal(nrow(ep_soc11[strata_var == "AGEGR2", ]), 1) # across_strata_across_trt: VASCULAR DISORDERS - ep_soc12 <- ep_prep_across_strata_across_trt[grepl("VASCULAR DISORDERS", endpoint_label),] + ep_soc12 <- ep_prep_across_strata_across_trt[grepl("VASCULAR DISORDERS", endpoint_label), ] expect_equal(nrow(ep_soc12), 0) # Rejected entities expect_equal(nrow(ep_rejected), 24) # Rejected endpoints - expect_equal(nrow(ep_rejected[crit_accept_endpoint==FALSE,]), 19) + expect_equal(nrow(ep_rejected[crit_accept_endpoint == FALSE, ]), 19) # Rejected by_strata_by_trt: Summary - ep_reject1 <- ep_rejected[crit_accept_endpoint==TRUE & crit_accept_by_strata_by_trt==FALSE,] + ep_reject1 <- ep_rejected[crit_accept_endpoint == TRUE & crit_accept_by_strata_by_trt == FALSE, ] expect_equal(nrow(ep_reject1), 4) # Rejected by_strata_by_trt: INFECTIONS AND INFESTATIONS - ep_reject2 <- ep_reject1[grepl("INFECTIONS AND INFESTATIONS", endpoint_label),] + ep_reject2 <- ep_reject1[grepl("INFECTIONS AND INFESTATIONS", endpoint_label), ] expect_equal(nrow(ep_reject2), 2) expect_equal(nrow(ep_reject2[strata_var == "SEX"]), 1) expect_equal(nrow(ep_reject2[strata_var == "AGEGR2"]), 1) # Rejected by_strata_by_trt: SKIN AND SUBCUTANEOUS TISSUE DISORDERS - ep_reject3 <- ep_reject1[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label),] + ep_reject3 <- ep_reject1[grepl("SKIN AND SUBCUTANEOUS TISSUE DISORDERS", endpoint_label), ] expect_equal(nrow(ep_reject3), 1) expect_equal(nrow(ep_reject3[strata_var == "SEX"]), 1) # Rejected by_strata_by_trt: VASCULAR DISORDERS - ep_reject4 <- ep_reject1[grepl("VASCULAR DISORDERS", endpoint_label),] + ep_reject4 <- ep_reject1[grepl("VASCULAR DISORDERS", endpoint_label), ] expect_equal(nrow(ep_reject3), 1) expect_equal(nrow(ep_reject3[strata_var == "SEX"]), 1) # Rejected by_strata_across_trt - ep_reject5 <- ep_rejected[crit_accept_endpoint==TRUE & crit_accept_by_strata_by_trt==TRUE & crit_accept_by_strata_across_trt==FALSE,] + ep_reject5 <- ep_rejected[crit_accept_endpoint == TRUE & crit_accept_by_strata_by_trt == TRUE & crit_accept_by_strata_across_trt == FALSE, ] expect_equal(nrow(ep_reject5), 1) expect_equal(nrow(ep_reject5[grepl("VASCULAR DISORDERS", endpoint_label) & strata_var == "AGEGR2"]), 1) - }) diff --git a/tests/testthat/test-expand_over_endpoints.R b/tests/testthat/test-expand_over_endpoints.R index bf04692..f5d29af 100644 --- a/tests/testthat/test-expand_over_endpoints.R +++ b/tests/testthat/test-expand_over_endpoints.R @@ -1,209 +1,216 @@ -test_that("grp level criterion works", - { - # SETUP ------------------------------------------------------------------- - ep <- rbind( - mk_ep_0001_waiting_grps( - data_prepare = mk_adcm, - group_by = list(list(CMCLAS = c())), - endpoint_label = "a" - ) - ) - - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - - - # ACT --------------------------------------------------------------------- - actual <- - expand_over_endpoints( - ep = ep_and_data$ep, - analysis_data_container = ep_and_data$analysis_data_container - ) - # EXPECT ------------------------------------------------------------------ - expected_values <- - ep_and_data$analysis_data_container$dat[[1]][!is.na(CMCLAS)]$CMCLAS |> unique() - expect_equal(nrow(actual), length(expected_values)) - }) - - -test_that("grp level works when only 1 level available in the data", - { - # SETUP ------------------------------------------------------------------- - ep <- rbind( - mk_ep_0001_waiting_grps( - data_prepare = mk_adcm, - group_by = list(list(CMCLAS = c("UNCODED"))), - endpoint_label = "b" - ) - ) - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - - - # ACT --------------------------------------------------------------------- - actual <- - expand_over_endpoints( - ep = ep_and_data$ep, - analysis_data_container = ep_and_data$analysis_data_container - ) - - # EXPECT ------------------------------------------------------------------ - - expected_valued <- - ep_and_data$analysis_data_container$dat[[1]]$CMCLAS |> unique() - expect_equal(nrow(actual), 1) - expect_true(grepl("UNCODED", actual$endpoint_group_filter)) - - }) - - -test_that("grp level criterion works when group across multiple variables", - { - # SETUP ------------------------------------------------------------------- - ep <- rbind( - mk_ep_0001_waiting_grps( - data_prepare = mk_adcm, - group_by = list(list( - CMCLAS = c("UNCODED"), RACEGR1 = c() - )), - endpoint_label = "c" - ) - ) - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - - # ACT --------------------------------------------------------------------- - actual <- - expand_over_endpoints( - ep = ep_and_data$ep, - analysis_data_container = ep_and_data$analysis_data_container - ) - - - # EXPECT ------------------------------------------------------------------ - expected <- - ep_and_data$analysis_data_container$dat[[1]]$RACEGR1 |> unique() |> length() - expect_equal(NROW(actual), expected) - }) - - -test_that("grp level criterion works when group_by is empty", - { - # SETUP ------------------------------------------------------------------- - ep <- rbind( - mk_ep_0001_waiting_grps(data_prepare = mk_adcm, - endpoint_label = "e"), - mk_ep_0001_waiting_grps( - data_prepare = mk_adcm, - group_by = list(list(CMCLAS = c())), - endpoint_label = "f" - ) - ) - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - - - # ACT --------------------------------------------------------------------- - - actual <- - expand_over_endpoints( - ep = ep_and_data$ep, - analysis_data_container = ep_and_data$analysis_data_container - ) - - # EXPECT ------------------------------------------------------------------ - - expected <- - ep_and_data$analysis_data_container$dat[[1]]$CMCLAS |> - unique() |> - length() - - expect_equal(nrow(actual), expected) - expect_equal(nrow(actual[is.na(endpoint_group_filter)]), 1) - expect_equal(actual[is.na(endpoint_group_filter), endpoint_id], "1-0001") - }) - - -test_that("dynamic endpoint labels", - { - # SETUP ------------------------------------------------------------------- - ep <- mk_ep_0001_base( - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - group_by = list(list(CMCLAS = c( - "UNCODED", "NERVOUS SYSTEM" - ))), - endpoint_filter = "AGEGR1 == '18-64'", - endpoint_label = " - - - ", - ) - ep <- add_id(ep) - ep_fn_map <- - suppressWarnings(unnest_endpoint_functions(ep)) - user_def_fn <- - mk_userdef_fn_dt(ep_fn_map, env = environment()) - fn_map <- - merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") - adam_db <- - fetch_db_data(study_metadata = list(), - fn_dt = user_def_fn) - ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) - - - # ACT --------------------------------------------------------------------- - actual <- - expand_over_endpoints( - ep = ep_and_data$ep, - analysis_data_container = ep_and_data$analysis_data_container - ) - - - # EXPECT ------------------------------------------------------------------ - expect_equal(nrow(actual), 2) - expect_equal( - actual$endpoint_label, - c( - "SAFFL - TRT01A - UNCODED - AGEGR1 == '18-64'", - "SAFFL - TRT01A - NERVOUS SYSTEM - AGEGR1 == '18-64'" - ) - ) - - }) +test_that("grp level criterion works", { + # SETUP ------------------------------------------------------------------- + ep <- rbind( + mk_ep_0001_waiting_grps( + data_prepare = mk_adcm, + group_by = list(list(CMCLAS = c())), + endpoint_label = "a" + ) + ) + + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + + + # ACT --------------------------------------------------------------------- + actual <- + expand_over_endpoints( + ep = ep_and_data$ep, + analysis_data_container = ep_and_data$analysis_data_container + ) + # EXPECT ------------------------------------------------------------------ + expected_values <- + ep_and_data$analysis_data_container$dat[[1]][!is.na(CMCLAS)]$CMCLAS |> unique() + expect_equal(nrow(actual), length(expected_values)) +}) + + +test_that("grp level works when only 1 level available in the data", { + # SETUP ------------------------------------------------------------------- + ep <- rbind( + mk_ep_0001_waiting_grps( + data_prepare = mk_adcm, + group_by = list(list(CMCLAS = c("UNCODED"))), + endpoint_label = "b" + ) + ) + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + + + # ACT --------------------------------------------------------------------- + actual <- + expand_over_endpoints( + ep = ep_and_data$ep, + analysis_data_container = ep_and_data$analysis_data_container + ) + + # EXPECT ------------------------------------------------------------------ + + expected_valued <- + ep_and_data$analysis_data_container$dat[[1]]$CMCLAS |> unique() + expect_equal(nrow(actual), 1) + expect_true(grepl("UNCODED", actual$endpoint_group_filter)) +}) + + +test_that("grp level criterion works when group across multiple variables", { + # SETUP ------------------------------------------------------------------- + ep <- rbind( + mk_ep_0001_waiting_grps( + data_prepare = mk_adcm, + group_by = list(list( + CMCLAS = c("UNCODED"), RACEGR1 = c() + )), + endpoint_label = "c" + ) + ) + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + + # ACT --------------------------------------------------------------------- + actual <- + expand_over_endpoints( + ep = ep_and_data$ep, + analysis_data_container = ep_and_data$analysis_data_container + ) + + + # EXPECT ------------------------------------------------------------------ + expected <- + ep_and_data$analysis_data_container$dat[[1]]$RACEGR1 |> + unique() |> + length() + expect_equal(NROW(actual), expected) +}) + + +test_that("grp level criterion works when group_by is empty", { + # SETUP ------------------------------------------------------------------- + ep <- rbind( + mk_ep_0001_waiting_grps( + data_prepare = mk_adcm, + endpoint_label = "e" + ), + mk_ep_0001_waiting_grps( + data_prepare = mk_adcm, + group_by = list(list(CMCLAS = c())), + endpoint_label = "f" + ) + ) + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) + + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + + + # ACT --------------------------------------------------------------------- + + actual <- + expand_over_endpoints( + ep = ep_and_data$ep, + analysis_data_container = ep_and_data$analysis_data_container + ) + + # EXPECT ------------------------------------------------------------------ + + expected <- + ep_and_data$analysis_data_container$dat[[1]]$CMCLAS |> + unique() |> + length() + + expect_equal(nrow(actual), expected) + expect_equal(nrow(actual[is.na(endpoint_group_filter)]), 1) + expect_equal(actual[is.na(endpoint_group_filter), endpoint_id], "1-0001") +}) + + +test_that("dynamic endpoint labels", { + # SETUP ------------------------------------------------------------------- + ep <- mk_ep_0001_base( + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + group_by = list(list(CMCLAS = c( + "UNCODED", "NERVOUS SYSTEM" + ))), + endpoint_filter = "AGEGR1 == '18-64'", + endpoint_label = " - - - ", + ) + ep <- add_id(ep) + ep_fn_map <- + suppressWarnings(unnest_endpoint_functions(ep)) + user_def_fn <- + mk_userdef_fn_dt(ep_fn_map, env = environment()) + fn_map <- + merge(ep_fn_map[, .(endpoint_spec_id, fn_hash)], user_def_fn, by = "fn_hash") + adam_db <- + fetch_db_data( + study_metadata = list(), + fn_dt = user_def_fn + ) + ep_and_data <- filter_db_data(ep, ep_fn_map, adam_db) + + + # ACT --------------------------------------------------------------------- + actual <- + expand_over_endpoints( + ep = ep_and_data$ep, + analysis_data_container = ep_and_data$analysis_data_container + ) + + + # EXPECT ------------------------------------------------------------------ + expect_equal(nrow(actual), 2) + expect_equal( + actual$endpoint_label, + c( + "SAFFL - TRT01A - UNCODED - AGEGR1 == '18-64'", + "SAFFL - TRT01A - NERVOUS SYSTEM - AGEGR1 == '18-64'" + ) + ) +}) diff --git a/tests/testthat/test-fetch_db_data.R b/tests/testthat/test-fetch_db_data.R index 40d8f63..6ff444f 100644 --- a/tests/testthat/test-fetch_db_data.R +++ b/tests/testthat/test-fetch_db_data.R @@ -1,5 +1,4 @@ -test_that("Fetching/proccessing adam works", -{ +test_that("Fetching/proccessing adam works", { # SETUP ------------------------------------------------------------------- fn_dt <- suppressWarnings( data.table::data.table( @@ -13,7 +12,8 @@ test_that("Fetching/proccessing adam works", # ACT --------------------------------------------------------------------- adam <- fn_dt[, eval_data_fn( fn = fn_callable, - study_metadata = list()), by = seq_len(nrow(fn_dt))] + study_metadata = list() + ), by = seq_len(nrow(fn_dt))] # EXPECT ------------------------------------------------------------------ @@ -24,8 +24,7 @@ test_that("Fetching/proccessing adam works", }) -test_that("Fetching adam data works when single data_prepare specified", -{ +test_that("Fetching adam data works when single data_prepare specified", { # SETUP ------------------------------------------------------------------- ep <- rbind(suppressWarnings( @@ -54,25 +53,27 @@ test_that("Fetching adam data works when single data_prepare specified", }) -test_that("Only unique adam datasets are returned", -{ +test_that("Only unique adam datasets are returned", { # SETUP ------------------------------------------------------------------- ep <- - rbind(suppressWarnings( - mk_ep_0001_base( - data_prepare = mk_adae, - endpoint_label = "A" - ) - ), - suppressWarnings( - mk_ep_0001_base( - data_prepare = mk_adae, - endpoint_label = "B" + rbind( + suppressWarnings( + mk_ep_0001_base( + data_prepare = mk_adae, + endpoint_label = "A" + ) + ), + suppressWarnings( + mk_ep_0001_base( + data_prepare = mk_adae, + endpoint_label = "B" + ) ) - )) + ) ep <- add_id(ep) ep_long <- suppressWarnings( - unnest_endpoint_functions(ep, fn_cols = c("data_prepare"))) + unnest_endpoint_functions(ep, fn_cols = c("data_prepare")) + ) function_dt <- mk_userdef_fn_dt(ep_long) # ACT --------------------------------------------------------------------- @@ -81,17 +82,22 @@ test_that("Only unique adam datasets are returned", # EXPECT ------------------------------------------------------------------ expect_equal(nrow(adam), 1) - expect_equal(adam$fn_name, - c("mk_adae")) - expect_equal(intersect("AGEGR2", names(adam$dat[[1]])), - "AGEGR2") - expect_equal(setdiff("TESTVAR", names(adam$dat[[1]])), - "TESTVAR") + expect_equal( + adam$fn_name, + c("mk_adae") + ) + expect_equal( + intersect("AGEGR2", names(adam$dat[[1]])), + "AGEGR2" + ) + expect_equal( + setdiff("TESTVAR", names(adam$dat[[1]])), + "TESTVAR" + ) }) -test_that("Multiple, but unique adam datasets are returned", -{ +test_that("Multiple, but unique adam datasets are returned", { # SETUP ------------------------------------------------------------------- ep <- rbind( @@ -128,15 +134,17 @@ test_that("Multiple, but unique adam datasets are returned", # EXPECT ------------------------------------------------------------------ expect_equal(nrow(adam), 2) - expect_equal(adam$fn_name, - c("mk_adae", - "mk_adex")) - + expect_equal( + adam$fn_name, + c( + "mk_adae", + "mk_adex" + ) + ) }) -test_that("data_prepare with no specified input datasets error out", -{ +test_that("data_prepare with no specified input datasets error out", { # SETUP ------------------------------------------------------------------- mk_adam_training_error <- function() { @@ -173,13 +181,12 @@ test_that("data_prepare with no specified input datasets error out", expect_error( function_dt <- mk_userdef_fn_dt(ep_long), "Function (mk_adam_training_error) of type (data_prepare) is supplied arguments it does not expect", - fixed=TRUE + fixed = TRUE ) }) -test_that("data_prepare with internal error gives useful error msg", -{ +test_that("data_prepare with internal error gives useful error msg", { # SETUP ------------------------------------------------------------------- error_fn <- function(study_metadata) { @@ -213,12 +220,12 @@ test_that("data_prepare with internal error gives useful error msg", # EXPECT ------------------------------------------------------------------ expect_error(fetch_db_data(study_metadata = study_metadata, fn_dt = function_dt), - regexp = "error_fn: problem in function") + regexp = "error_fn: problem in function" + ) }) -test_that("Fetching/proccessing adsl works", -{ +test_that("Fetching/proccessing adsl works", { mk_adam_error <- function(study_metadata) { nonpackage::test() } @@ -238,7 +245,11 @@ test_that("Fetching/proccessing adsl works", # ACT --------------------------------------------------------------------- # EXPECT ------------------------------------------------------------------ - expect_error(fetch_db_data(study_metadata = - study_metadata, fn_dt = function_dt), - regexp = "mk_adam_error: there is no package called") + expect_error( + fetch_db_data( + study_metadata = + study_metadata, fn_dt = function_dt + ), + regexp = "mk_adam_error: there is no package called" + ) }) diff --git a/tests/testthat/test-filter_db_data.R b/tests/testthat/test-filter_db_data.R index 1b607ad..2c05ac2 100644 --- a/tests/testthat/test-filter_db_data.R +++ b/tests/testthat/test-filter_db_data.R @@ -1,73 +1,79 @@ -test_that("base case: filter_db_data works with pop filter and no custom filter", - { - # SETUP ------------------------------------------------------------------- - - adam <- mk_adcm() %>% .[, "INDEX_" := .I] - pop_var <- "SAFFL" - pop_value <- "Y" - period_var = NA_character_ - period_value = NA_character_ - custom_pop_filter <- NA_character_ - - ep <- - data.table( - pop_var = pop_var, - pop_value = pop_value, - period_var = period_var, - period_value = period_value, - custom_pop_filter = custom_pop_filter, - endpoint_spec_id = 1 - ) - ep_fn_map <- - data.table(endpoint_spec_id = 1, - fn_type = "data_prepare", - fn_hash = "a") - adam_db <- - data.table(fn_type = "data_prepare", - fn_hash = "a", - dat = list(adam)) - - # ACT --------------------------------------------------------------------- - - actual <- filter_db_data(ep, ep_fn_map, adam_db)$analysis_data_container - # EXPECT ------------------------------------------------------------------ - expect_equal(actual$dat[[1]], adam[SAFFL == "Y"]) - }) - - -test_that("base case: filter_db_data works with both pop filter and custom filter", - { - # SETUP ------------------------------------------------------------------- - adam <- mk_adcm() %>% .[, "INDEX_" := .I] - pop_var <- "SAFFL" - pop_value <- "Y" - period_var = "ANL01FL" - period_value = "Y" - custom_pop_filter <- "CMSEQ >= 60" - - ep <- - data.table( - pop_var = pop_var, - pop_value = pop_value, - period_var = period_var, - period_value = period_value, - custom_pop_filter = custom_pop_filter, - endpoint_spec_id = 1 - ) - ep_fn_map <- - data.table(endpoint_spec_id = 1, - fn_type = "data_prepare", - fn_hash = "a") - adam_db <- - data.table(fn_type = "data_prepare", - fn_hash = "a", - dat = list(adam)) - # ACT --------------------------------------------------------------------- - actual <- filter_db_data(ep, ep_fn_map, adam_db)$analysis_data_container - - # EXPECT ------------------------------------------------------------------ - expect_equal(actual$dat[[1]], adam[SAFFL == "Y" & CMSEQ >= 60]) - }) +test_that("base case: filter_db_data works with pop filter and no custom filter", { + # SETUP ------------------------------------------------------------------- + + adam <- mk_adcm() %>% .[, "INDEX_" := .I] + pop_var <- "SAFFL" + pop_value <- "Y" + period_var <- NA_character_ + period_value <- NA_character_ + custom_pop_filter <- NA_character_ + + ep <- + data.table( + pop_var = pop_var, + pop_value = pop_value, + period_var = period_var, + period_value = period_value, + custom_pop_filter = custom_pop_filter, + endpoint_spec_id = 1 + ) + ep_fn_map <- + data.table( + endpoint_spec_id = 1, + fn_type = "data_prepare", + fn_hash = "a" + ) + adam_db <- + data.table( + fn_type = "data_prepare", + fn_hash = "a", + dat = list(adam) + ) + + # ACT --------------------------------------------------------------------- + + actual <- filter_db_data(ep, ep_fn_map, adam_db)$analysis_data_container + # EXPECT ------------------------------------------------------------------ + expect_equal(actual$dat[[1]], adam[SAFFL == "Y"]) +}) + + +test_that("base case: filter_db_data works with both pop filter and custom filter", { + # SETUP ------------------------------------------------------------------- + adam <- mk_adcm() %>% .[, "INDEX_" := .I] + pop_var <- "SAFFL" + pop_value <- "Y" + period_var <- "ANL01FL" + period_value <- "Y" + custom_pop_filter <- "CMSEQ >= 60" + + ep <- + data.table( + pop_var = pop_var, + pop_value = pop_value, + period_var = period_var, + period_value = period_value, + custom_pop_filter = custom_pop_filter, + endpoint_spec_id = 1 + ) + ep_fn_map <- + data.table( + endpoint_spec_id = 1, + fn_type = "data_prepare", + fn_hash = "a" + ) + adam_db <- + data.table( + fn_type = "data_prepare", + fn_hash = "a", + dat = list(adam) + ) + # ACT --------------------------------------------------------------------- + actual <- filter_db_data(ep, ep_fn_map, adam_db)$analysis_data_container + + # EXPECT ------------------------------------------------------------------ + expect_equal(actual$dat[[1]], adam[SAFFL == "Y" & CMSEQ >= 60]) +}) test_that( "base case: filter_db_data throws error when pop_var or pop_value has not been specified", @@ -77,8 +83,8 @@ test_that( adam <- mk_adcm() %>% .[, "INDEX_" := .I] pop_var <- NULL pop_value <- NULL - period_var = "ANL01FL" - period_value = "Y" + period_var <- "ANL01FL" + period_value <- "Y" custom_pop_filter <- "CMSEQ >= 60" ep <- @@ -91,13 +97,17 @@ test_that( endpoint_spec_id = 1 ) ep_fn_map <- - data.table(endpoint_spec_id = 1, - fn_type = "data_prepare", - fn_hash = "a") + data.table( + endpoint_spec_id = 1, + fn_type = "data_prepare", + fn_hash = "a" + ) adam_db <- - data.table(fn_type = "data_prepare", - fn_hash = "a", - dat = list(adam)) + data.table( + fn_type = "data_prepare", + fn_hash = "a", + dat = list(adam) + ) # ACT --------------------------------------------------------------------- @@ -108,121 +118,129 @@ test_that( ) -test_that("filter_db_data works with >1 row in ep dataset", - { - # SETUP ------------------------------------------------------------------- - adam <- mk_adcm() %>% .[, "INDEX_" := .I] - pop_var <- "SAFFL" - pop_value <- "Y" - period_var = "ANL01FL" - period_value = "Y" - custom_pop_filter <- "CMSEQ >= 60" - - ep <- - data.table( - pop_var = pop_var, - pop_value = pop_value, - period_var = period_var, - period_value = period_value, - custom_pop_filter = custom_pop_filter, - endpoint_spec_id = 1 - ) - - ep <- rbind(ep, ep) - ep[2, custom_pop_filter := "CMSEQ >= 75"] - ep_fn_map <- - data.table(endpoint_spec_id = 1, - fn_type = "data_prepare", - fn_hash = "a") - adam_db <- - data.table(fn_type = "data_prepare", - fn_hash = "a", - dat = list(adam)) - # ACT --------------------------------------------------------------------- - actual <- filter_db_data(ep, ep_fn_map, adam_db) - - # EXPECT ------------------------------------------------------------------ - expect_equal(actual$analysis_data_container$dat[[1]], adam[SAFFL == "Y" & - CMSEQ >= 60]) - expect_equal(actual$analysis_data_container$dat[[2]], adam[SAFFL == "Y" & - CMSEQ >= 75]) - }) - - -test_that("data keys are same for same data, different for different data", - { - # SETUP ------------------------------------------------------------------- - adam <- mk_adcm() %>% .[, "INDEX_" := .I] - pop_var <- "SAFFL" - pop_value <- "Y" - period_var = "ANL01FL" - period_value = "Y" - custom_pop_filter <- "CMSEQ >= 60" - - ep <- - data.table( - pop_var = pop_var, - pop_value = pop_value, - period_var = period_var, - period_value = period_value, - custom_pop_filter = custom_pop_filter, - endpoint_spec_id = 1 - ) - - ep <- rbind(ep, ep, ep) - ep[2, custom_pop_filter := "CMSEQ >= 75"] - ep_fn_map <- - data.table(endpoint_spec_id = 1, - fn_type = "data_prepare", - fn_hash = "a") - adam_db <- - data.table(fn_type = "data_prepare", - fn_hash = "a", - dat = list(adam)) - # ACT --------------------------------------------------------------------- - actual <- - filter_db_data(ep, ep_fn_map, adam_db) - # EXPECT ------------------------------------------------------------------ - expect_equal(nrow(actual$analysis_data_container), 2) - expect_equal(nrow(actual$ep), 3) - - }) - - -test_that("output tables are keyed properly", - { - # SETUP ------------------------------------------------------------------- - adam <- mk_adcm() %>% .[, "INDEX_" := .I] - pop_var <- "SAFFL" - pop_value <- "Y" - period_var = "ANL01FL" - period_value = "Y" - custom_pop_filter <- "CMSEQ >= 60" - - ep <- - data.table( - pop_var = pop_var, - pop_value = pop_value, - period_var = period_var, - period_value = period_value, - custom_pop_filter = custom_pop_filter, - endpoint_spec_id = 1 - ) - - ep <- rbind(ep, ep, ep) - ep[2, custom_pop_filter := "CMSEQ >= 75"] - ep_fn_map <- - data.table(endpoint_spec_id = 1, - fn_type = "data_prepare", - fn_hash = "a") - adam_db <- - data.table(fn_type = "data_prepare", - fn_hash = "a", - dat = list(adam)) - # ACT --------------------------------------------------------------------- - actual <- - filter_db_data(ep, ep_fn_map, adam_db) - # EXPECT ------------------------------------------------------------------ - expect_equal(key(actual$analysis_data_container), "key_analysis_data") - expect_equal(key(actual$ep), "key_analysis_data") - }) +test_that("filter_db_data works with >1 row in ep dataset", { + # SETUP ------------------------------------------------------------------- + adam <- mk_adcm() %>% .[, "INDEX_" := .I] + pop_var <- "SAFFL" + pop_value <- "Y" + period_var <- "ANL01FL" + period_value <- "Y" + custom_pop_filter <- "CMSEQ >= 60" + + ep <- + data.table( + pop_var = pop_var, + pop_value = pop_value, + period_var = period_var, + period_value = period_value, + custom_pop_filter = custom_pop_filter, + endpoint_spec_id = 1 + ) + + ep <- rbind(ep, ep) + ep[2, custom_pop_filter := "CMSEQ >= 75"] + ep_fn_map <- + data.table( + endpoint_spec_id = 1, + fn_type = "data_prepare", + fn_hash = "a" + ) + adam_db <- + data.table( + fn_type = "data_prepare", + fn_hash = "a", + dat = list(adam) + ) + # ACT --------------------------------------------------------------------- + actual <- filter_db_data(ep, ep_fn_map, adam_db) + + # EXPECT ------------------------------------------------------------------ + expect_equal(actual$analysis_data_container$dat[[1]], adam[SAFFL == "Y" & + CMSEQ >= 60]) + expect_equal(actual$analysis_data_container$dat[[2]], adam[SAFFL == "Y" & + CMSEQ >= 75]) +}) + + +test_that("data keys are same for same data, different for different data", { + # SETUP ------------------------------------------------------------------- + adam <- mk_adcm() %>% .[, "INDEX_" := .I] + pop_var <- "SAFFL" + pop_value <- "Y" + period_var <- "ANL01FL" + period_value <- "Y" + custom_pop_filter <- "CMSEQ >= 60" + + ep <- + data.table( + pop_var = pop_var, + pop_value = pop_value, + period_var = period_var, + period_value = period_value, + custom_pop_filter = custom_pop_filter, + endpoint_spec_id = 1 + ) + + ep <- rbind(ep, ep, ep) + ep[2, custom_pop_filter := "CMSEQ >= 75"] + ep_fn_map <- + data.table( + endpoint_spec_id = 1, + fn_type = "data_prepare", + fn_hash = "a" + ) + adam_db <- + data.table( + fn_type = "data_prepare", + fn_hash = "a", + dat = list(adam) + ) + # ACT --------------------------------------------------------------------- + actual <- + filter_db_data(ep, ep_fn_map, adam_db) + # EXPECT ------------------------------------------------------------------ + expect_equal(nrow(actual$analysis_data_container), 2) + expect_equal(nrow(actual$ep), 3) +}) + + +test_that("output tables are keyed properly", { + # SETUP ------------------------------------------------------------------- + adam <- mk_adcm() %>% .[, "INDEX_" := .I] + pop_var <- "SAFFL" + pop_value <- "Y" + period_var <- "ANL01FL" + period_value <- "Y" + custom_pop_filter <- "CMSEQ >= 60" + + ep <- + data.table( + pop_var = pop_var, + pop_value = pop_value, + period_var = period_var, + period_value = period_value, + custom_pop_filter = custom_pop_filter, + endpoint_spec_id = 1 + ) + + ep <- rbind(ep, ep, ep) + ep[2, custom_pop_filter := "CMSEQ >= 75"] + ep_fn_map <- + data.table( + endpoint_spec_id = 1, + fn_type = "data_prepare", + fn_hash = "a" + ) + adam_db <- + data.table( + fn_type = "data_prepare", + fn_hash = "a", + dat = list(adam) + ) + # ACT --------------------------------------------------------------------- + actual <- + filter_db_data(ep, ep_fn_map, adam_db) + # EXPECT ------------------------------------------------------------------ + expect_equal(key(actual$analysis_data_container), "key_analysis_data") + expect_equal(key(actual$ep), "key_analysis_data") +}) diff --git a/tests/testthat/test-group_ep_for_targets.R b/tests/testthat/test-group_ep_for_targets.R index 1e95ae3..8e5a832 100644 --- a/tests/testthat/test-group_ep_for_targets.R +++ b/tests/testthat/test-group_ep_for_targets.R @@ -1,7 +1,7 @@ test_that("grouping works for different values", { dt <- data.table(value = 1:50) -actual <- group_ep_for_targets(dt, 10) -expect_equal(unique(actual$targets_group), 0:4) -actual <- group_ep_for_targets(dt, 25) -expect_equal(unique(actual$targets_group), 0:1) + actual <- group_ep_for_targets(dt, 10) + expect_equal(unique(actual$targets_group), 0:4) + actual <- group_ep_for_targets(dt, 25) + expect_equal(unique(actual$targets_group), 0:1) }) diff --git a/tests/testthat/test-mk_endpoint_str.R b/tests/testthat/test-mk_endpoint_str.R index 2ab4588..5049556 100644 --- a/tests/testthat/test-mk_endpoint_str.R +++ b/tests/testthat/test-mk_endpoint_str.R @@ -10,7 +10,8 @@ test_that("No specification of pop_var", { period_var = "ANL01FL", period_value = "Y", data_prepare = mk_adae - )) + ) + ) }) test_that("No specification of pop_value", { @@ -25,7 +26,8 @@ test_that("No specification of pop_value", { period_var = "ANL01FL", period_value = "Y", data_prepare = mk_adae - )) + ) + ) }) test_that("No specification of treatment_var", { @@ -40,7 +42,8 @@ test_that("No specification of treatment_var", { period_var = "ANL01FL", period_value = "Y", data_prepare = mk_adae - )) + ) + ) }) test_that("No specification of treatment_refval", { @@ -55,7 +58,8 @@ test_that("No specification of treatment_refval", { period_var = "ANL01FL", period_value = "Y", data_prepare = mk_adae - )) + ) + ) }) test_that("No specification of period_var", { @@ -69,7 +73,7 @@ test_that("No specification of period_var", { data_prepare = mk_adae ) # EXPECT ------------------------------------------------------------------ - expect_s3_class(actual, "data.table") + expect_s3_class(actual, "data.table") expect_equal(nrow(actual), 1) }) @@ -86,7 +90,8 @@ test_that("No specification of data_prepare", { treatment_refval = "Xanomeline High Dose", period_var = "ANL01FL", period_value = "Y" - )) + ) + ) }) test_that("Specification of non-existing data_prepare", { @@ -102,7 +107,8 @@ test_that("Specification of non-existing data_prepare", { period_var = "ANL01FL", period_value = "Y", data_prepare = mk_adae_notexist - )) + ) + ) }) test_that("Specification of non-existing stat_by_strata_by_trt function", { @@ -119,8 +125,10 @@ test_that("Specification of non-existing stat_by_strata_by_trt function", { period_value = "Y", data_prepare = mk_adae, stat_by_strata_by_trt = list( - "N_subjects" = n_subj_notexist) - )) + "N_subjects" = n_subj_notexist + ) + ) + ) }) test_that("Specification of non-existing stat_by_strata_by_trt function", { @@ -137,8 +145,10 @@ test_that("Specification of non-existing stat_by_strata_by_trt function", { period_value = "Y", data_prepare = mk_adae, stat_by_strata_across_trt = list( - "N_subjects" = n_subj_notexist) - )) + "N_subjects" = n_subj_notexist + ) + ) + ) }) test_that("Specification of non-existing stat_across_strata_across_trt function", { @@ -155,14 +165,18 @@ test_that("Specification of non-existing stat_across_strata_across_trt function" period_value = "Y", data_prepare = mk_adae, stat_across_strata_across_trt = list( - "N_subjects" = n_subj_notexist) - )) + "N_subjects" = n_subj_notexist + ) + ) + ) }) test_that("naked functions are correctly stored", { # SETUP ------------------------------------------------------------------- - crit_fn <- function(...){return(F)} + crit_fn <- function(...) { + return(F) + } # ACT --------------------------------------------------------------------- # EXPECT ------------------------------------------------------------------ @@ -178,7 +192,6 @@ test_that("naked functions are correctly stored", { crit_by_strata_by_trt = crit_fn, crit_by_strata_across_trt = crit_fn, stat_by_strata_by_trt = crit_fn, - ) expected <- mk_endpoint_str( @@ -194,10 +207,10 @@ test_that("naked functions are correctly stored", { crit_by_strata_across_trt = list(crit_fn), stat_by_strata_by_trt = list(crit_fn) ) - expect_equal(actual$crit_endpoint,expected$crit_endpoint) - expect_equal(actual$crit_by_strata_across_trt,expected$crit_by_strata_across_trt) - expect_equal(actual$crit_by_strata_by_trt,expected$crit_by_strata_by_trt) - expect_equal(actual$stat_by_strata_by_trt,expected$stat_by_strata_by_trt) + expect_equal(actual$crit_endpoint, expected$crit_endpoint) + expect_equal(actual$crit_by_strata_across_trt, expected$crit_by_strata_across_trt) + expect_equal(actual$crit_by_strata_by_trt, expected$crit_by_strata_by_trt) + expect_equal(actual$stat_by_strata_by_trt, expected$stat_by_strata_by_trt) }) @@ -358,56 +371,63 @@ test_that("naked functions are correctly stored", { # }) test_that("Column types of endpoint specification with complete function specification", { + # SETUP ------------------------------------------------------------------- -# SETUP ------------------------------------------------------------------- - - expected_cols <- c("study_metadata", "pop_var", "pop_value", "treatment_var", - "treatment_refval", "period_var", "period_value", "custom_pop_filter", - "endpoint_filter", "group_by", "stratify_by", "endpoint_label", - "data_prepare", "stat_by_strata_by_trt", "stat_by_strata_across_trt", - "stat_across_strata_across_trt", "crit_endpoint", "crit_by_strata_by_trt", - "crit_by_strata_across_trt", "only_strata_with_events") + expected_cols <- c( + "study_metadata", "pop_var", "pop_value", "treatment_var", + "treatment_refval", "period_var", "period_value", "custom_pop_filter", + "endpoint_filter", "group_by", "stratify_by", "endpoint_label", + "data_prepare", "stat_by_strata_by_trt", "stat_by_strata_across_trt", + "stat_across_strata_across_trt", "crit_endpoint", "crit_by_strata_by_trt", + "crit_by_strata_across_trt", "only_strata_with_events" + ) - chr_cols <- c("pop_var", "pop_value", "treatment_var", "treatment_refval", - "period_var", "period_value", "custom_pop_filter", - "endpoint_filter", "endpoint_label") + chr_cols <- c( + "pop_var", "pop_value", "treatment_var", "treatment_refval", + "period_var", "period_value", "custom_pop_filter", + "endpoint_filter", "endpoint_label" + ) - fn_cols <- c("data_prepare", "stat_by_strata_by_trt", - "stat_by_strata_across_trt", "stat_across_strata_across_trt", "crit_endpoint", - "crit_by_strata_by_trt", "crit_by_strata_across_trt") + fn_cols <- c( + "data_prepare", "stat_by_strata_by_trt", + "stat_by_strata_across_trt", "stat_across_strata_across_trt", "crit_endpoint", + "crit_by_strata_by_trt", "crit_by_strata_across_trt" + ) crit_ep_dummy <- function(...) { return(T) } - crit_sgd_dummy <- function(...){ + crit_sgd_dummy <- function(...) { return(T) } - crit_sga_dummy <- function(...){ + crit_sga_dummy <- function(...) { return(T) } -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- ep <- mk_ep_0001_base( data_prepare = mk_adae, - group_by = list(list(RACE=c())), - stat_by_strata_by_trt = list("n_sub" = n_sub, - "n_subev" = n_subev), + group_by = list(list(RACE = c())), + stat_by_strata_by_trt = list( + "n_sub" = n_sub, + "n_subev" = n_subev + ), stat_by_strata_across_trt = list("n_sub" = n_sub), stat_across_strata_across_trt = list("n_subev" = n_subev), crit_endpoint = list(c(crit_ep_dummy, var1 = "test")), crit_by_strata_by_trt = list(c(crit_sgd_dummy, var1 = "test")), crit_by_strata_across_trt = list(c(crit_sga_dummy, var1 = "test")), endpoint_label = "This is a test" - ) + ) -# EXPECT ------------------------------------------------------------------ + # EXPECT ------------------------------------------------------------------ # Check set of output columns expect_equal(setdiff(names(ep), expected_cols), character(0)) # Check character columns - for (i in chr_cols){ + for (i in chr_cols) { # Check column type is character expect_equal(typeof(ep[[i]]), "character", info = paste("Column:", i)) } @@ -417,14 +437,13 @@ test_that("Column types of endpoint specification with complete function specifi # Check named list columns nlst_cols <- c("study_metadata", "group_by") - for (i in nlst_cols){ - + for (i in nlst_cols) { # Check column type is list # expect_equal(typeof(ep[[i]]), "list", info = paste("Column:", i)) # ** Temporary ** # Check column type is list or NA - expect_equal(typeof(ep[[i]]) %in% c("list","character"), TRUE, info = paste("Column:", i)) + expect_equal(typeof(ep[[i]]) %in% c("list", "character"), TRUE, info = paste("Column:", i)) } # Check that group_by entries are named or the list content is NULL @@ -434,23 +453,35 @@ test_that("Column types of endpoint specification with complete function specifi # ** Temporary ** # Check that group_by entries are named or is a character NA - expect_equal(unlist(lapply(ep[["group_by"]], - function(x){length(names(x))>0 | identical(x, NA_character_)})), TRUE, - info = paste("Column:", i)) + expect_equal( + unlist(lapply( + ep[["group_by"]], + function(x) { + length(names(x)) > 0 | identical(x, NA_character_) + } + )), TRUE, + info = paste("Column:", i) + ) # Check unnamed list columns lst_cols <- c("stratify_by") - for (i in lst_cols){ + for (i in lst_cols) { # Check column type is list expect_equal(typeof(ep[[i]]), "list", info = paste("Column:", i)) # Check that list entries are character - expect_equal(all(unlist(lapply(ep[[i]], - function(x){is.character(x)}))), TRUE, - info = paste("Column:", i)) + expect_equal( + all(unlist(lapply( + ep[[i]], + function(x) { + is.character(x) + } + ))), TRUE, + info = paste("Column:", i) + ) } # Check function columns - for (i in fn_cols){ + for (i in fn_cols) { # Check column type is list of length 1 expect_equal(typeof(ep[[i]]), "list", info = paste("Column:", i)) @@ -462,27 +493,31 @@ test_that("Column types of endpoint specification with complete function specifi expect_equal(typeof(names(eval(ep[["stat_by_strata_by_trt"]][[1]]))), "character") expect_equal(typeof(names(eval(ep[["stat_by_strata_across_trt"]][[1]]))), "character") expect_equal(typeof(names(eval(ep[["stat_across_strata_across_trt"]][[1]]))), "character") - }) test_that("Column types of minimal endpoint specification", { - # SETUP ------------------------------------------------------------------- - expected_cols <- c("study_metadata", "pop_var", "pop_value", "treatment_var", - "treatment_refval", "period_var", "period_value", "custom_pop_filter", - "endpoint_filter", "group_by", "stratify_by", "endpoint_label", - "data_prepare", "stat_by_strata_by_trt", "stat_by_strata_across_trt", - "stat_across_strata_across_trt", "crit_endpoint", "crit_by_strata_by_trt", - "crit_by_strata_across_trt", "only_strata_with_events") + expected_cols <- c( + "study_metadata", "pop_var", "pop_value", "treatment_var", + "treatment_refval", "period_var", "period_value", "custom_pop_filter", + "endpoint_filter", "group_by", "stratify_by", "endpoint_label", + "data_prepare", "stat_by_strata_by_trt", "stat_by_strata_across_trt", + "stat_across_strata_across_trt", "crit_endpoint", "crit_by_strata_by_trt", + "crit_by_strata_across_trt", "only_strata_with_events" + ) - chr_cols <- c("pop_var", "pop_value", "treatment_var", "treatment_refval", - "period_var", "period_value", "custom_pop_filter", - "endpoint_filter", "endpoint_label") + chr_cols <- c( + "pop_var", "pop_value", "treatment_var", "treatment_refval", + "period_var", "period_value", "custom_pop_filter", + "endpoint_filter", "endpoint_label" + ) - fn_cols <- c("data_prepare", "stat_by_strata_by_trt", - "stat_by_strata_across_trt", "stat_across_strata_across_trt", "crit_endpoint", - "crit_by_strata_by_trt", "crit_by_strata_across_trt") + fn_cols <- c( + "data_prepare", "stat_by_strata_by_trt", + "stat_by_strata_across_trt", "stat_across_strata_across_trt", "crit_endpoint", + "crit_by_strata_by_trt", "crit_by_strata_across_trt" + ) # ACT --------------------------------------------------------------------- @@ -494,7 +529,7 @@ test_that("Column types of minimal endpoint specification", { expect_equal(setdiff(names(ep), expected_cols), character(0)) # Check character columns - for (i in chr_cols){ + for (i in chr_cols) { # Check column type is character expect_equal(typeof(ep[[i]]), "character", info = paste("Column:", i)) } @@ -504,14 +539,13 @@ test_that("Column types of minimal endpoint specification", { # Check named list columns nlst_cols <- c("study_metadata", "group_by") - for (i in nlst_cols){ - + for (i in nlst_cols) { # Check column type is list # expect_equal(typeof(ep[[i]]), "list", info = paste("Column:", i)) # ** Temporary ** # Check column type is list or NA - expect_equal(typeof(ep[[i]]) %in% c("list","character"), TRUE, info = paste("Column:", i)) + expect_equal(typeof(ep[[i]]) %in% c("list", "character"), TRUE, info = paste("Column:", i)) } # Check that group_by entries are named or the list content is NULL @@ -521,32 +555,43 @@ test_that("Column types of minimal endpoint specification", { # ** Temporary ** # Check that group_by entries are named or is a character NA - expect_equal(unlist(lapply(ep[["group_by"]], - function(x){length(names(x))>0 | identical(x, NA_character_)})), TRUE, - info = paste("Column:", i)) + expect_equal( + unlist(lapply( + ep[["group_by"]], + function(x) { + length(names(x)) > 0 | identical(x, NA_character_) + } + )), TRUE, + info = paste("Column:", i) + ) # Check unnamed list columns lst_cols <- c("stratify_by") - for (i in lst_cols){ + for (i in lst_cols) { # Check column type is list expect_equal(typeof(ep[[i]]), "list", info = paste("Column:", i)) # Check that list entries are character - expect_equal(all(unlist(lapply(ep[[i]], - function(x){is.character(x)}))), TRUE, - info = paste("Column:", i)) + expect_equal( + all(unlist(lapply( + ep[[i]], + function(x) { + is.character(x) + } + ))), TRUE, + info = paste("Column:", i) + ) } # Check function columns - for (i in fn_cols){ + for (i in fn_cols) { # Check column type is list of length 1 expect_equal(typeof(ep[[i]]), "list", info = paste("Column:", i)) # Check that content of each list is language - #expect_equal(typeof(ep[[i]][[1]]), "language", info = paste("Column:", i)) + # expect_equal(typeof(ep[[i]][[1]]), "language", info = paste("Column:", i)) # ** Temporary ** # Check that content of each list is language or NULL expect_equal(typeof(ep[[i]][[1]]) %in% c("language", "NULL"), TRUE, info = paste("Column:", i)) } - }) diff --git a/tests/testthat/test-mk_userdef_fn_dt.R b/tests/testthat/test-mk_userdef_fn_dt.R index 3173f70..9a95e51 100644 --- a/tests/testthat/test-mk_userdef_fn_dt.R +++ b/tests/testthat/test-mk_userdef_fn_dt.R @@ -1,7 +1,6 @@ -test_that("Parse_all_user_function works on table with all valid inputs.", - { -# SETUP ------------------------------------------------------------------- - ep <- mk_ep_0001_base( +test_that("Parse_all_user_function works on table with all valid inputs.", { + # SETUP ------------------------------------------------------------------- + ep <- mk_ep_0001_base( endpoint_label = "a", data_prepare = mk_adae, ) @@ -9,21 +8,23 @@ test_that("Parse_all_user_function works on table with all valid inputs.", endpoints_long <- suppressWarnings(unnest_endpoint_functions(ep)) - expected_names = c("fn_type", - "fn_hash", - "fn_name", - "fn_call_char", - "fn_callable") - character_columns = c("fn_type", "fn_hash", "fn_name", "fn_call_char") + expected_names <- c( + "fn_type", + "fn_hash", + "fn_name", + "fn_call_char", + "fn_callable" + ) + character_columns <- c("fn_type", "fn_hash", "fn_name", "fn_call_char") -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- function_table <- mk_userdef_fn_dt(endpoints_long) -# EXPECT ------------------------------------------------------------------ - column_types = sapply(function_table, class) + # EXPECT ------------------------------------------------------------------ + column_types <- sapply(function_table, class) for (col in character_columns) { expect_type(column_types[col], "character") @@ -32,14 +33,12 @@ test_that("Parse_all_user_function works on table with all valid inputs.", for (callable in function_table$fn_callable) { expect_type(callable, "closure") } - }) -test_that("Duplicate functions are collapsed", - { -# SETUP ------------------------------------------------------------------- +test_that("Duplicate functions are collapsed", { + # SETUP ------------------------------------------------------------------- ep <- rbind( - ep <- mk_ep_0001_base( + ep <- mk_ep_0001_base( endpoint_label = "A", data_prepare = mk_adae ), @@ -53,20 +52,18 @@ test_that("Duplicate functions are collapsed", suppressWarnings(unnest_endpoint_functions(ep)) -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- function_table <- mk_userdef_fn_dt(endpoints_long) -# EXPECT ------------------------------------------------------------------ - n_unique_fun = length(unique(endpoints_long$fn_hash)) + # EXPECT ------------------------------------------------------------------ + n_unique_fun <- length(unique(endpoints_long$fn_hash)) expect_equal(n_unique_fun, nrow(function_table)) - }) -test_that("Parse_all_user_function works when additonal args passed to stat methods", - { -# SETUP ------------------------------------------------------------------- +test_that("Parse_all_user_function works when additonal args passed to stat methods", { + # SETUP ------------------------------------------------------------------- ep <- mk_ep_0001_base( endpoint_label = "a", @@ -82,21 +79,23 @@ test_that("Parse_all_user_function works when additonal args passed to stat meth ep )) - expected_names = c("fn_type", - "fn_hash", - "fn_name", - "fn_call_char", - "fn_callable") - character_columns = c("fn_type", "fn_hash", "fn_name", "fn_call_char") + expected_names <- c( + "fn_type", + "fn_hash", + "fn_name", + "fn_call_char", + "fn_callable" + ) + character_columns <- c("fn_type", "fn_hash", "fn_name", "fn_call_char") -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- function_table <- mk_userdef_fn_dt(endpoints_long) -# EXPECT ------------------------------------------------------------------ + # EXPECT ------------------------------------------------------------------ - column_types = sapply(function_table, class) + column_types <- sapply(function_table, class) for (col in character_columns) { expect_type(column_types[col], "character") @@ -105,36 +104,37 @@ test_that("Parse_all_user_function works when additonal args passed to stat meth for (callable in function_table$fn_callable) { expect_type(callable, "closure") } - }) -test_that("Parse_all_user_function works when passed an emptly function slot", - { - -# SETUP ------------------------------------------------------------------- - ep <- mk_ep_0001_base(endpoint_label = "a", - data_prepare = mk_adae, - stratify_by = list(c("sex2")),) +test_that("Parse_all_user_function works when passed an emptly function slot", { + # SETUP ------------------------------------------------------------------- + ep <- mk_ep_0001_base( + endpoint_label = "a", + data_prepare = mk_adae, + stratify_by = list(c("sex2")), + ) ep <- add_id(ep) endpoints_long <- suppressWarnings(unnest_endpoint_functions( ep, )) - expected_names = c("fn_type", - "fn_hash", - "fn_name", - "fn_call_char", - "fn_callable") - character_columns = c("fn_type", "fn_hash", "fn_name", "fn_call_char") + expected_names <- c( + "fn_type", + "fn_hash", + "fn_name", + "fn_call_char", + "fn_callable" + ) + character_columns <- c("fn_type", "fn_hash", "fn_name", "fn_call_char") -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- function_table <- mk_userdef_fn_dt(endpoints_long) -# EXPECT ------------------------------------------------------------------ - column_types = sapply(function_table, class) + # EXPECT ------------------------------------------------------------------ + column_types <- sapply(function_table, class) for (col in character_columns) { expect_type(column_types[col], "character") diff --git a/tests/testthat/test-parse_function_input.R b/tests/testthat/test-parse_function_input.R index 9470b99..524943a 100644 --- a/tests/testthat/test-parse_function_input.R +++ b/tests/testthat/test-parse_function_input.R @@ -1,10 +1,10 @@ test_that("parse function works with namesspace defined", { - test_data <- rnorm(20, mean=50, sd=100) + test_data <- rnorm(20, mean = 50, sd = 100) # Defining statistical functions of interest: # to be included in endpoints definition. - stat_funcs = list( + stat_funcs <- list( mean, c(base::mean, trim = 0.4), # include arguments. max, @@ -16,21 +16,22 @@ test_that("parse function works with namesspace defined", { }) test_that("functions parsed can be excecuted", { - withr::with_seed(123, {data <- rnbinom(n = 20, size = 2, prob = 0.1)}) + withr::with_seed(123, { + data <- rnbinom(n = 20, size = 2, prob = 0.1) + }) # Defining statistical functions of interest: # to be included in endpoints definition. - stat_funcs = list( + stat_funcs <- list( mean, c(base::mean, trim = 0.4), # include arguments. max ) fn_parsed <- lapply(stat_funcs, parse_function_input) # apply the functions - out <- sapply(fn_parsed, function(fn){ + out <- sapply(fn_parsed, function(fn) { fn(data) }) expect_equal(out, c(14.1, 12.75, 36)) - }) diff --git a/tests/testthat/test-pipeline_manual.R b/tests/testthat/test-pipeline_manual.R index 571318b..66c9d3a 100644 --- a/tests/testthat/test-pipeline_manual.R +++ b/tests/testthat/test-pipeline_manual.R @@ -51,7 +51,7 @@ test_that("Manual pipeline works", { group_by = list(list(RACE = c())), stat_by_strata_by_trt = list( "n_subev" = n_subev, - c("p_subev" = p_subev, a="USUBJID") + c("p_subev" = p_subev, a = "USUBJID") ), stat_by_strata_across_trt = list("n_subev_trt_diff" = n_subev_trt_diff), stat_across_strata_across_trt = list("P-interaction" = contingency2x2_strata_test), @@ -142,7 +142,8 @@ test_that("Manual pipeline works", { as.data.table() ep_stat <- rbind(ep_stat_eval, ep_crit_by_strata_across_trt[!(crit_accept_endpoint)], - fill = TRUE) |> + fill = TRUE + ) |> setorder(endpoint_id, stat_result_id, stat_result_label) expect_equal(nrow(ep_stat), 54) diff --git a/tests/testthat/test-prepare_for_stats.R b/tests/testthat/test-prepare_for_stats.R index 0964d4f..98788b7 100644 --- a/tests/testthat/test-prepare_for_stats.R +++ b/tests/testthat/test-prepare_for_stats.R @@ -6,12 +6,13 @@ test_that("Invalid 'type' errors out ", { # EXPECT ------------------------------------------------------------------ - expect_error(prepare_for_stats( - ep = data.table(), - fn_map = data.table(), - type = "x" - ), - regexp = "'arg' should be one of" + expect_error( + prepare_for_stats( + ep = data.table(), + fn_map = data.table(), + type = "x" + ), + regexp = "'arg' should be one of" ) }) @@ -476,7 +477,6 @@ test_that("base - dataprep", { }) test_that("Check that only strata levels with events are kept", { - # SETUP ------------------------------------------------------------------- ep <- @@ -527,9 +527,9 @@ test_that("Check that only strata levels with events are kept", { apply_criterion_by_strata(ep_crit_endpoint, analysis_data_container, fn_map) ep_crit_by_strata_across_trt <- apply_criterion_by_strata(ep_crit_by_strata_by_trt, - analysis_data_container, - fn_map, - type = "by_strata_across_trt" + analysis_data_container, + fn_map, + type = "by_strata_across_trt" ) @@ -537,15 +537,17 @@ test_that("Check that only strata levels with events are kept", { ep_prep_by_strata_by_trt <- prepare_for_stats(ep_crit_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_by_strata_by_trt") + analysis_data_container, + fn_map, + type = "stat_by_strata_by_trt" + ) ep_prep_by_strata_across_trt <- prepare_for_stats(ep_crit_by_strata_across_trt, - analysis_data_container, - fn_map, - type = "stat_by_strata_across_trt") + analysis_data_container, + fn_map, + type = "stat_by_strata_across_trt" + ) # INSPECT ----------------------------------------------------------------- @@ -557,9 +559,11 @@ test_that("Check that only strata levels with events are kept", { expected_n_combinations <- nrow(unique(dat[, c("AESOC", "RACE", "TRT01A")])) actual_n_combinations <- - nrow(unique(ep_prep_by_strata_by_trt[stat_event_exist == TRUE & - grepl("total", stat_filter) == 0, - c("endpoint_group_filter", "stat_filter")])) + nrow(unique(ep_prep_by_strata_by_trt[ + stat_event_exist == TRUE & + grepl("total", stat_filter) == 0, + c("endpoint_group_filter", "stat_filter") + ])) expect_equal(expected_n_combinations, actual_n_combinations) # Check specific SOC @@ -569,8 +573,8 @@ test_that("Check that only strata levels with events are kept", { expect_equal(nrow(ep_prep_bb_sub[strata_var == "RACE"]), 8) event_index <- ep_prep_bb_sub$event_index[[1]] - expected_stat_event_exist <- unlist(lapply(ep_prep_bb_sub$stat_filter, function(x){ - nrow(dat[list(event_index)][eval(parse(text=x))])>0 + expected_stat_event_exist <- unlist(lapply(ep_prep_bb_sub$stat_filter, function(x) { + nrow(dat[list(event_index)][eval(parse(text = x))]) > 0 })) actual_stat_event_exists <- ep_prep_bb_sub$stat_event_exist expect_equal(expected_stat_event_exist, actual_stat_event_exists) @@ -578,5 +582,4 @@ test_that("Check that only strata levels with events are kept", { # by_strata_across_trt expect_equal(nrow(ep_prep_by_strata_across_trt), 64) expect_equal(all(ep_prep_by_strata_across_trt$stat_event_exist), TRUE) - }) diff --git a/tests/testthat/test-targets.R b/tests/testthat/test-targets.R index ffb1173..aecbee1 100644 --- a/tests/testthat/test-targets.R +++ b/tests/testthat/test-targets.R @@ -1,479 +1,470 @@ -test_that("Base case: targets pipeline works", - { - - # SETUP ------------------------------------------------------------------- - testr::create_local_project() - crit_endpoint <- function(...) { - return(T) - } - crit_sga <- function(...) { - return(T) - } - crit_sgd <- function(...) { - return(T) - } - - mk_ep_def <- function() { - ep <- mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - endpoint_label = "A", - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - group_by = list(list(RACE = c())), - stat_by_strata_by_trt = list("n_subev" = c(n_subev)), - stat_by_strata_across_trt = list("n_subev_trt_diff" = c(n_subev_trt_diff)), - stat_across_strata_across_trt = list("P-interaction" = c(contingency2x2_strata_test)), - crit_endpoint = list(crit_endpoint), - crit_by_strata_by_trt = list(crit_sgd), - crit_by_strata_across_trt = list(crit_sga) - ) - } - - # This is needed because mk_adcm it is calling from a new R session, it - # doesn't have access to the helper-* functions from chef - n_subev <- n_subev - n_subev_trt_diff <- n_subev_trt_diff - contingency2x2_ptest <- contingency2x2_ptest - - use_chef( - pipeline_dir = "pipeline", - r_functions_dir = "R/", - pipeline_id = "01", - mk_endpoint_def_fn = mk_ep_def, - mk_adam_fn = list(mk_adcm), - mk_criteria_fn = list(crit_endpoint, crit_sga, crit_sgd) - ) - - dump("n_subev", file = "R/custom_functions.R") - dump("n_subev_trt_diff", file = "R/custom_functions.R", append = TRUE) - dump("contingency2x2_strata_test", - file = "R/custom_functions.R", - append = TRUE) - # ACT --------------------------------------------------------------------- - - tar_make() - # EXPECT ------------------------------------------------------------------ - x <- tar_meta() %>% as.data.table() - expect_true(all(is.na(x$error))) - tar_load(ep_stat) - expect_equal(NROW(ep_stat), 36) - expect_equal(NCOL(ep_stat), 37) - expect_snapshot(ep_stat$stat_result_value) - }) - -test_that("targets pipeline works no criteria fn and missing by_* functions", - { - - # SETUP ------------------------------------------------------------------- - testr::create_local_project() - - mk_ep_def <- function() { - ep <- mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - endpoint_label = "A", - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - group_by = list(list(RACE = c())), - stat_by_strata_by_trt = list("n_subev" = c(n_subev)) - ) - } - - # This is needed because mk_adcm it is calling from a new R session, it - # doesn't have access to the helper-* functions from chef - n_subev <- n_subev - n_subev_trt_diff <- n_subev_trt_diff - contingency2x2_ptest <- contingency2x2_ptest - - use_chef( - pipeline_dir = "pipeline", - r_functions_dir = "R/", - pipeline_id = "01", - mk_endpoint_def_fn = mk_ep_def, - mk_adam_fn = list(mk_adcm) - ) - dump("n_subev", file = "R/custom_functions.R") - dump("n_subev_trt_diff", file = "R/custom_functions.R", append = TRUE) - dump("contingency2x2_ptest", file = "R/custom_functions.R", append = TRUE) - - # ACT --------------------------------------------------------------------- - tar_make() - - # EXPECT ------------------------------------------------------------------ - x <- tar_meta() %>% as.data.table() - expect_true(all(is.na(x$error))) - tar_load(ep_stat) - expect_equal(NROW(ep_stat), 18) - expect_equal(NCOL(ep_stat), 37) - expect_snapshot(ep_stat$stat_result_value) - }) - -test_that("branching after prepare for stats step works", - { - # SETUP ------------------------------------------------------------------- - testr::create_local_project() - - mk_ep_def <- function() { - ep <- mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - stratify_by = list(c("SEX")), - data_prepare = mk_adae, - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list("fn_1" = c(n_subev), - "fn_2" = c(n_sub)) - ) - } - - # This is needed because mk_adcm it is calling from a new R session, it - # doesn't have access to the helper-* functions from chef - n_subev <- n_subev - n_subev_trt_diff <- n_subev_trt_diff - contingency2x2_ptest <- contingency2x2_ptest - - use_chef( - pipeline_dir = "pipeline", - r_functions_dir = "R/", - pipeline_id = "01", - mk_endpoint_def_fn = mk_ep_def, - mk_adam_fn = list(mk_adae), - branch_group_size = 1 - - ) - dump("n_subev", file = "R/custom_functions.R") - dump("n_sub", file = "R/custom_functions.R", append = TRUE) - - # ACT --------------------------------------------------------------------- - tar_make() - +test_that("Base case: targets pipeline works", { + # SETUP ------------------------------------------------------------------- + testr::create_local_project() + crit_endpoint <- function(...) { + return(T) + } + crit_sga <- function(...) { + return(T) + } + crit_sgd <- function(...) { + return(T) + } + + mk_ep_def <- function() { + ep <- mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + period_var = "ANL01FL", + period_value = "Y", + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + endpoint_label = "A", + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + group_by = list(list(RACE = c())), + stat_by_strata_by_trt = list("n_subev" = c(n_subev)), + stat_by_strata_across_trt = list("n_subev_trt_diff" = c(n_subev_trt_diff)), + stat_across_strata_across_trt = list("P-interaction" = c(contingency2x2_strata_test)), + crit_endpoint = list(crit_endpoint), + crit_by_strata_by_trt = list(crit_sgd), + crit_by_strata_across_trt = list(crit_sga) + ) + } + + # This is needed because mk_adcm it is calling from a new R session, it + # doesn't have access to the helper-* functions from chef + n_subev <- n_subev + n_subev_trt_diff <- n_subev_trt_diff + contingency2x2_ptest <- contingency2x2_ptest + + use_chef( + pipeline_dir = "pipeline", + r_functions_dir = "R/", + pipeline_id = "01", + mk_endpoint_def_fn = mk_ep_def, + mk_adam_fn = list(mk_adcm), + mk_criteria_fn = list(crit_endpoint, crit_sga, crit_sgd) + ) + + dump("n_subev", file = "R/custom_functions.R") + dump("n_subev_trt_diff", file = "R/custom_functions.R", append = TRUE) + dump("contingency2x2_strata_test", + file = "R/custom_functions.R", + append = TRUE + ) + # ACT --------------------------------------------------------------------- + + tar_make() # EXPECT ------------------------------------------------------------------ - x <- tar_meta() %>% as.data.table() - expect_true(all(is.na(x$error))) - tar_load(ep_stat) - expect_equal(NROW(ep_stat), 12) - expect_equal(NCOL(ep_stat), 37) - expect_snapshot(ep_stat$stat_result_value) + x <- tar_meta() %>% as.data.table() + expect_true(all(is.na(x$error))) + tar_load(ep_stat) + expect_equal(NROW(ep_stat), 36) + expect_equal(NCOL(ep_stat), 37) + expect_snapshot(ep_stat$stat_result_value) }) -test_that("ep_fn_map is always outdated", - { - # SETUP ------------------------------------------------------------------- - testr::create_local_project() - - mk_ep_def <- function() { - ep <- mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - endpoint_label = "A", - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list("n_subev" = c(n_subev)), - ) - } - - # This is needed because mk_adcm it is calling from a new R session, it - # doesn't have access to the helper-* functions from chef - n_subev <- n_subev - - use_chef( - pipeline_dir = "pipeline", - r_functions_dir = "R/", - pipeline_id = "01", - mk_endpoint_def_fn = mk_ep_def, - mk_adam_fn = list(mk_adcm) - ) - - dump("n_subev", file = "R/custom_functions.R") - - # ACT --------------------------------------------------------------------- - tar_make(ep_fn_map) - # EXPECT ------------------------------------------------------------------ - expect_equal(tar_outdated(names = c(ep_fn_map, ep, ep_id)), "ep_fn_map") - x <- tar_meta() %>% as.data.table() - expect_true(all(is.na(x$error))) - }) - - -test_that("study_data responds to changes in source data", - { - # SETUP ------------------------------------------------------------------- - testr::create_local_project() - saveRDS(data.table(runif(10)), file = "tmp_data_obj.rds") - mk_test_fn <- function(study_metadata) { - readRDS("tmp_data_obj.rds") - } - mk_ep_def <- function() { - ep <- mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("SEX")), - data_prepare = mk_test_fn, - endpoint_label = "A", - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list("n_subev" = c(n_subev)), - ) - } - - # This is needed because mk_adcm it is calling from a new R session, it - # doesn't have access to the helper-* functions from chef - n_subev <- n_subev - - use_chef( - pipeline_dir = "pipeline", - r_functions_dir = "R/", - pipeline_id = "01", - mk_endpoint_def_fn = mk_ep_def, - mk_adam_fn = list(mk_test_fn) - ) - - dump("n_subev", file = "R/custom_functions.R") - tar_make(study_data) - tar_load(study_data) - x <- tar_meta() %>% as.data.table() - expect_true(all(is.na(x$error))) - before <- study_data$dat - # ACT --------------------------------------------------------------------- - saveRDS(data.table(runif(10)), file = "tmp_data_obj.rds") - tar_make(study_data) - - # EXPECT ------------------------------------------------------------------ - tar_load(study_data) - after <- study_data$dat - expect_equal(intersect(c("study_data"), tar_outdated(names = study_data)), "study_data") - expect_failure(expect_equal(before, after)) - x <- tar_meta() %>% as.data.table() - expect_true(all(is.na(x$error))) - }) - - -test_that("Only affected branches outdated when new strata added", - { - # SETUP ------------------------------------------------------------------- - mk_endpoint_def <- function() { - mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - stratify_by = list(c("SEX")), - group_by = list(list(AESEV = c())), - data_prepare = mk_adae, - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list( - "fn_1" = c(n_sub), - "fn_2_adae" = c(p_subev) - ) - ) - - } - - # This is needed because mk_adcm it is calling from a new R session, it - # doesn't have access to the helper-* functions from chef - path <- - system.file("templates", package = "chef") |> file.path("template-pipeline.R") - tmp <- readLines(path) - tar_dir({ - dir.create("R") - dump("p_subev", file = "R/custom_functions.R") - dump("n_sub", file = "R/custom_functions.R", append = TRUE) - dump("mk_adae", file = "R/mk_adae.R") - dump("mk_adcm", file = "R/mk_adcm.R") - dump("mk_endpoint_def", file = "R/mk_endpoint_def.R") - - x <- - whisker::whisker.render(tmp, data = list(r_script_dir = "R/")) - writeLines(whisker::whisker.render(tmp, data = list(r_script_dir = - "R/")), con = "_targets.R") - tar_make() - - mk_endpoint_def <- function() { - list( - mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - stratify_by = list(c("SEX")), - group_by = list(list(AESEV = c())), - data_prepare = mk_adae, - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list( - "fn_1" = c(n_sub), - "fn_2_adae" = c(p_subev) - ) - ), - mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - stratify_by = list(c("SEX")), - data_prepare = mk_adcm, - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list("fn_2_adcm" = c(n_sub)) - ) - ) |> data.table::rbindlist() - } - dump("mk_endpoint_def", file = "R/mk_endpoint_def.R") - tar_make() - x <- tar_meta() |> data.table::setDT() - - - expect_outdated_patterns <- - c( - "study_data", - "ep_prep_by_strata_by_trt_", - "ep_stat_by_strata_by_trt_", - "ep_crit_by_strata_by_trt_", - "ep_crit_endpoint_" - - ) - - timestamp_re_run_target <- - x[grepl("ep_fn_map", name), time][2] - - # Check that the targets we expected to be skipped were actually - # skipped - actual <- - vapply(expect_outdated_patterns, function(i) { - rgx <- paste0(i, collapse = "|") - compar_dt <- x[grepl(rgx, name), .(name, time)] - NROW(compar_dt[time < timestamp_re_run_target]) == 1 - }, FUN.VALUE = logical(1L)) - - - # We expect a FALSE for study_data, as this target should NOT run - # before ep_fn_map - expect_equal(actual, c(FALSE, TRUE, TRUE, TRUE, TRUE), ignore_attr = TRUE) - - }) - - }) +test_that("targets pipeline works no criteria fn and missing by_* functions", { + # SETUP ------------------------------------------------------------------- + testr::create_local_project() + + mk_ep_def <- function() { + ep <- mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + period_var = "ANL01FL", + period_value = "Y", + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + endpoint_label = "A", + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + group_by = list(list(RACE = c())), + stat_by_strata_by_trt = list("n_subev" = c(n_subev)) + ) + } + + # This is needed because mk_adcm it is calling from a new R session, it + # doesn't have access to the helper-* functions from chef + n_subev <- n_subev + n_subev_trt_diff <- n_subev_trt_diff + contingency2x2_ptest <- contingency2x2_ptest + + use_chef( + pipeline_dir = "pipeline", + r_functions_dir = "R/", + pipeline_id = "01", + mk_endpoint_def_fn = mk_ep_def, + mk_adam_fn = list(mk_adcm) + ) + dump("n_subev", file = "R/custom_functions.R") + dump("n_subev_trt_diff", file = "R/custom_functions.R", append = TRUE) + dump("contingency2x2_ptest", file = "R/custom_functions.R", append = TRUE) + + # ACT --------------------------------------------------------------------- + tar_make() + # EXPECT ------------------------------------------------------------------ + x <- tar_meta() %>% as.data.table() + expect_true(all(is.na(x$error))) + tar_load(ep_stat) + expect_equal(NROW(ep_stat), 18) + expect_equal(NCOL(ep_stat), 37) + expect_snapshot(ep_stat$stat_result_value) +}) -test_that("Check for discordant columns in result data model when having one endpoint spec without grouping and one endpoint spec with grouping", { - - # SETUP ------------------------------------------------------------------- - mk_endpoint_def <- function() { - ep <- mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - stratify_by = list(c("SEX", "AGEGR1")), - data_prepare = mk_adae, - endpoint_label = "A", - custom_pop_filter = - "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - group_by = list(list( - AESOC = c(), AESEV = c() - )), - stat_by_strata_by_trt = list(c(n_sub)) - ) - - ep2 <- mk_endpoint_str( - data_prepare = mk_advs, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_var = "SAFFL", - pop_value = "Y", - stratify_by = list(c("AGEGR1", "SEX")), - stat_by_strata_by_trt = list(c(n_sub)), - endpoint_label = "Demographics endpoint (categorical measures)" - ) +test_that("branching after prepare for stats step works", { + # SETUP ------------------------------------------------------------------- + testr::create_local_project() + + mk_ep_def <- function() { + ep <- mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + stratify_by = list(c("SEX")), + data_prepare = mk_adae, + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + stat_by_strata_by_trt = list( + "fn_1" = c(n_subev), + "fn_2" = c(n_sub) + ) + ) + } + + # This is needed because mk_adcm it is calling from a new R session, it + # doesn't have access to the helper-* functions from chef + n_subev <- n_subev + n_subev_trt_diff <- n_subev_trt_diff + contingency2x2_ptest <- contingency2x2_ptest + + use_chef( + pipeline_dir = "pipeline", + r_functions_dir = "R/", + pipeline_id = "01", + mk_endpoint_def_fn = mk_ep_def, + mk_adam_fn = list(mk_adae), + branch_group_size = 1 + ) + dump("n_subev", file = "R/custom_functions.R") + dump("n_sub", file = "R/custom_functions.R", append = TRUE) + + # ACT --------------------------------------------------------------------- + tar_make() - data.table::rbindlist(list(ep, ep2)) - } + # EXPECT ------------------------------------------------------------------ + x <- tar_meta() %>% as.data.table() + expect_true(all(is.na(x$error))) + tar_load(ep_stat) + expect_equal(NROW(ep_stat), 12) + expect_equal(NCOL(ep_stat), 37) + expect_snapshot(ep_stat$stat_result_value) +}) - mk_advs <- function(study_metadata) { - # Read ADSL - adsl <- data.table::as.data.table(pharmaverseadam::adsl) +test_that("ep_fn_map is always outdated", { + # SETUP ------------------------------------------------------------------- + testr::create_local_project() + + mk_ep_def <- function() { + ep <- mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + period_var = "ANL01FL", + period_value = "Y", + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + endpoint_label = "A", + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + stat_by_strata_by_trt = list("n_subev" = c(n_subev)), + ) + } + + # This is needed because mk_adcm it is calling from a new R session, it + # doesn't have access to the helper-* functions from chef + n_subev <- n_subev + + use_chef( + pipeline_dir = "pipeline", + r_functions_dir = "R/", + pipeline_id = "01", + mk_endpoint_def_fn = mk_ep_def, + mk_adam_fn = list(mk_adcm) + ) + + dump("n_subev", file = "R/custom_functions.R") + + # ACT --------------------------------------------------------------------- + tar_make(ep_fn_map) + # EXPECT ------------------------------------------------------------------ + expect_equal(tar_outdated(names = c(ep_fn_map, ep, ep_id)), "ep_fn_map") + x <- tar_meta() %>% as.data.table() + expect_true(all(is.na(x$error))) +}) - # Filter treatment arms - adsl <- adsl[adsl$TRT01A %in% c("Placebo", "Xanomeline High Dose")] - adsl[1, AGEGR1 := NA_character_] - adsl[2:10, SEX := NA_character_] - # Read ADVS - advs <- data.table::as.data.table(pharmaverseadam::advs) +test_that("study_data responds to changes in source data", { + # SETUP ------------------------------------------------------------------- + testr::create_local_project() + saveRDS(data.table(runif(10)), file = "tmp_data_obj.rds") + mk_test_fn <- function(study_metadata) { + readRDS("tmp_data_obj.rds") + } + mk_ep_def <- function() { + ep <- mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + period_var = "ANL01FL", + period_value = "Y", + stratify_by = list(c("SEX")), + data_prepare = mk_test_fn, + endpoint_label = "A", + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + stat_by_strata_by_trt = list("n_subev" = c(n_subev)), + ) + } + + # This is needed because mk_adcm it is calling from a new R session, it + # doesn't have access to the helper-* functions from chef + n_subev <- n_subev + + use_chef( + pipeline_dir = "pipeline", + r_functions_dir = "R/", + pipeline_id = "01", + mk_endpoint_def_fn = mk_ep_def, + mk_adam_fn = list(mk_test_fn) + ) + + dump("n_subev", file = "R/custom_functions.R") + tar_make(study_data) + tar_load(study_data) + x <- tar_meta() %>% as.data.table() + expect_true(all(is.na(x$error))) + before <- study_data$dat + # ACT --------------------------------------------------------------------- + saveRDS(data.table(runif(10)), file = "tmp_data_obj.rds") + tar_make(study_data) - # Identify baseline body weight - advs_bw <- advs[advs$PARAMCD == "WEIGHT" & advs$VISIT == "BASELINE"] + # EXPECT ------------------------------------------------------------------ + tar_load(study_data) + after <- study_data$dat + expect_equal(intersect(c("study_data"), tar_outdated(names = study_data)), "study_data") + expect_failure(expect_equal(before, after)) + x <- tar_meta() %>% as.data.table() + expect_true(all(is.na(x$error))) +}) - # Create new variable BW_BASELINE - advs_bw[["BW_BASELINE"]] <- advs_bw[["AVAL"]] - # Merge ADSL, ADAE and baseline body weight from ADVS - adam_out <- - merge(adsl, advs_bw[, c("BW_BASELINE", "USUBJID")], by = "USUBJID", all.x = TRUE) +test_that("Only affected branches outdated when new strata added", { + # SETUP ------------------------------------------------------------------- + mk_endpoint_def <- function() { + mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + stratify_by = list(c("SEX")), + group_by = list(list(AESEV = c())), + data_prepare = mk_adae, + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + stat_by_strata_by_trt = list( + "fn_1" = c(n_sub), + "fn_2_adae" = c(p_subev) + ) + ) + } + + # This is needed because mk_adcm it is calling from a new R session, it + # doesn't have access to the helper-* functions from chef + path <- + system.file("templates", package = "chef") |> file.path("template-pipeline.R") + tmp <- readLines(path) + tar_dir({ + dir.create("R") + dump("p_subev", file = "R/custom_functions.R") + dump("n_sub", file = "R/custom_functions.R", append = TRUE) + dump("mk_adae", file = "R/mk_adae.R") + dump("mk_adcm", file = "R/mk_adcm.R") + dump("mk_endpoint_def", file = "R/mk_endpoint_def.R") + + x <- + whisker::whisker.render(tmp, data = list(r_script_dir = "R/")) + writeLines(whisker::whisker.render(tmp, data = list( + r_script_dir = + "R/" + )), con = "_targets.R") + tar_make() - return(adam_out) + mk_endpoint_def <- function() { + list( + mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + stratify_by = list(c("SEX")), + group_by = list(list(AESEV = c())), + data_prepare = mk_adae, + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + stat_by_strata_by_trt = list( + "fn_1" = c(n_sub), + "fn_2_adae" = c(p_subev) + ) + ), + mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + stratify_by = list(c("SEX")), + data_prepare = mk_adcm, + custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + stat_by_strata_by_trt = list("fn_2_adcm" = c(n_sub)) + ) + ) |> data.table::rbindlist() } + dump("mk_endpoint_def", file = "R/mk_endpoint_def.R") + tar_make() + x <- tar_meta() |> data.table::setDT() + + + expect_outdated_patterns <- + c( + "study_data", + "ep_prep_by_strata_by_trt_", + "ep_stat_by_strata_by_trt_", + "ep_crit_by_strata_by_trt_", + "ep_crit_endpoint_" + ) + + timestamp_re_run_target <- + x[grepl("ep_fn_map", name), time][2] + + # Check that the targets we expected to be skipped were actually + # skipped + actual <- + vapply(expect_outdated_patterns, function(i) { + rgx <- paste0(i, collapse = "|") + compar_dt <- x[grepl(rgx, name), .(name, time)] + NROW(compar_dt[time < timestamp_re_run_target]) == 1 + }, FUN.VALUE = logical(1L)) + + + # We expect a FALSE for study_data, as this target should NOT run + # before ep_fn_map + expect_equal(actual, c(FALSE, TRUE, TRUE, TRUE, TRUE), ignore_attr = TRUE) + }) +}) - # This is needed because mk_adcm it is calling from a new R session, it - # doesn't have access to the helper-* functions from chef - path <- - system.file("templates", package = "chef") |> - file.path("template-pipeline.R") - tmp <- readLines(path) - -# ACT --------------------------------------------------------------------- - - tar_dir({ - dir.create("R") - dump("n_sub", file = "R/custom_functions.R") - dump("mk_adae", file = "R/mk_adae.R") - dump("mk_advs", file = "R/mk_advs.R") - dump("mk_endpoint_def", file = "R/mk_endpoint_def.R") - - x <- whisker::whisker.render(tmp, data = list(r_script_dir = "R/")) - writeLines(whisker::whisker.render(tmp, data = list( - r_script_dir = "R/")), con = "_targets.R") - - tar_make() - -# EXPECT ------------------------------------------------------------------ - - - targets::tar_load(ep_stat) - expect_equal(nrow(ep_stat), 700) - expect_equal(ncol(ep_stat), 37) - expect_equal(sum(ep_stat$endpoint_spec_id == 1), 690) - expect_equal(sum(ep_stat$endpoint_spec_id == 2), 10) - - x <- tar_meta() |> data.table::setDT() - expect_false(any(!is.na(x$error))) - - }) + +test_that("Check for discordant columns in result data model when having one endpoint spec without grouping and one endpoint spec with grouping", { + # SETUP ------------------------------------------------------------------- + mk_endpoint_def <- function() { + ep <- mk_endpoint_str( + study_metadata = list(), + pop_var = "SAFFL", + pop_value = "Y", + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + stratify_by = list(c("SEX", "AGEGR1")), + data_prepare = mk_adae, + endpoint_label = "A", + custom_pop_filter = + "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", + group_by = list(list( + AESOC = c(), AESEV = c() + )), + stat_by_strata_by_trt = list(c(n_sub)) + ) + + ep2 <- mk_endpoint_str( + data_prepare = mk_advs, + treatment_var = "TRT01A", + treatment_refval = "Xanomeline High Dose", + pop_var = "SAFFL", + pop_value = "Y", + stratify_by = list(c("AGEGR1", "SEX")), + stat_by_strata_by_trt = list(c(n_sub)), + endpoint_label = "Demographics endpoint (categorical measures)" + ) + + data.table::rbindlist(list(ep, ep2)) + } + + mk_advs <- function(study_metadata) { + # Read ADSL + adsl <- data.table::as.data.table(pharmaverseadam::adsl) + + # Filter treatment arms + adsl <- adsl[adsl$TRT01A %in% c("Placebo", "Xanomeline High Dose")] + adsl[1, AGEGR1 := NA_character_] + adsl[2:10, SEX := NA_character_] + + # Read ADVS + advs <- data.table::as.data.table(pharmaverseadam::advs) + + # Identify baseline body weight + advs_bw <- advs[advs$PARAMCD == "WEIGHT" & advs$VISIT == "BASELINE"] + + # Create new variable BW_BASELINE + advs_bw[["BW_BASELINE"]] <- advs_bw[["AVAL"]] + + # Merge ADSL, ADAE and baseline body weight from ADVS + adam_out <- + merge(adsl, advs_bw[, c("BW_BASELINE", "USUBJID")], by = "USUBJID", all.x = TRUE) + + return(adam_out) + } + + # This is needed because mk_adcm it is calling from a new R session, it + # doesn't have access to the helper-* functions from chef + path <- + system.file("templates", package = "chef") |> + file.path("template-pipeline.R") + tmp <- readLines(path) + + # ACT --------------------------------------------------------------------- + + tar_dir({ + dir.create("R") + dump("n_sub", file = "R/custom_functions.R") + dump("mk_adae", file = "R/mk_adae.R") + dump("mk_advs", file = "R/mk_advs.R") + dump("mk_endpoint_def", file = "R/mk_endpoint_def.R") + + x <- whisker::whisker.render(tmp, data = list(r_script_dir = "R/")) + writeLines(whisker::whisker.render(tmp, data = list( + r_script_dir = "R/" + )), con = "_targets.R") + + tar_make() + + # EXPECT ------------------------------------------------------------------ + + + targets::tar_load(ep_stat) + expect_equal(nrow(ep_stat), 700) + expect_equal(ncol(ep_stat), 37) + expect_equal(sum(ep_stat$endpoint_spec_id == 1), 690) + expect_equal(sum(ep_stat$endpoint_spec_id == 2), 10) + + x <- tar_meta() |> data.table::setDT() + expect_false(any(!is.na(x$error))) + }) }) diff --git a/tests/testthat/test-try_and_validate.R b/tests/testthat/test-try_and_validate.R index 6ff4e55..2ae5a11 100644 --- a/tests/testthat/test-try_and_validate.R +++ b/tests/testthat/test-try_and_validate.R @@ -56,8 +56,7 @@ test_that("validate_stat_output in simple cases", { test_that( "with_error_to_debug creates a debugging session if and only if evaluation fails - no output validation.", { - -# SETUP ------------------------------------------------------------------- + # SETUP ------------------------------------------------------------------- my_fun <- function(x) { @@ -73,7 +72,7 @@ test_that( expect_false(file.exists(filename)) -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- # Check debug is created when a valid call is wrapped @@ -83,7 +82,7 @@ test_that( fixed = TRUE ) -# EXPECT ------------------------------------------------------------------ + # EXPECT ------------------------------------------------------------------ expect_true(file.exists(filename)) @@ -97,9 +96,7 @@ test_that( test_that( "with_error_to_debug creates a debugging session if and only if validation fails - valid calls", { - - -# SETUP ------------------------------------------------------------------- + # SETUP ------------------------------------------------------------------- fn_invalid <- function(x) { @@ -120,13 +117,13 @@ test_that( # Ensure no error and that debug is not create without validation problems. -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- -# EXPECT ------------------------------------------------------------------ + # EXPECT ------------------------------------------------------------------ - expect_equal( + expect_equal( try_and_validate(fn_valid(10), debug_dir = tmp, validator = validate_stat_output diff --git a/tests/testthat/test-unnest_by_fns.R b/tests/testthat/test-unnest_by_fns.R index f77b6af..c6491ad 100644 --- a/tests/testthat/test-unnest_by_fns.R +++ b/tests/testthat/test-unnest_by_fns.R @@ -6,13 +6,14 @@ test_that("Errors on empty data table", { test_that("Errors when provided cols do not exist in data.table", { dt <- data.table::data.table(a = rnorm(10), b = rnorm(10)) - expect_error(unnest_by_fns(dt, cols = c("c", "d")), - "The following columns are not found") + expect_error( + unnest_by_fns(dt, cols = c("c", "d")), + "The following columns are not found" + ) }) -test_that("Expload data model based on fn's works", -{ +test_that("Expload data model based on fn's works", { # SETUP ------------------------------------------------------------------- ep <- mk_ep_0001_base(data_prepare = mk_adae) @@ -25,11 +26,12 @@ test_that("Expload data model based on fn's works", expect_equal(actual$fn_name, "mk_adae") }) -test_that("Duplicate fn's get their own row", -{ +test_that("Duplicate fn's get their own row", { # SETUP ------------------------------------------------------------------- - ep <- rbind(mk_ep_0001_base(data_prepare = mk_adae), - mk_ep_0001_base(data_prepare = mk_adae)) + ep <- rbind( + mk_ep_0001_base(data_prepare = mk_adae), + mk_ep_0001_base(data_prepare = mk_adae) + ) # ACT --------------------------------------------------------------------- actual <- unnest_by_fns(ep, cols = c("data_prepare")) @@ -40,97 +42,91 @@ test_that("Duplicate fn's get their own row", }) -test_that("Unnamed naked fns get nammed", - { - # SETUP ------------------------------------------------------------------- - ep <- mk_ep_0001_base(data_prepare = mk_adae,stat_by_strata_by_trt=n_sub) - # ACT --------------------------------------------------------------------- - actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) - - # EXPECT ------------------------------------------------------------------ - expect_equal(nrow(actual), 2) - expect_equal(actual$fn[[1]], substitute(mk_adae)) - expect_equal(actual$fn[[2]], substitute(n_sub)) - }) - -test_that("Unnamed fns enclosed in list() get nammed", - { - # SETUP ------------------------------------------------------------------- - ep <- mk_ep_0001_base(data_prepare = mk_adae,stat_by_strata_by_trt=list(n_sub)) - # ACT --------------------------------------------------------------------- - actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) - - # EXPECT ------------------------------------------------------------------ - expect_equal(nrow(actual), 2) - expect_equal(actual$fn[[1]], substitute(mk_adae)) - expect_equal(actual$fn[[2]], substitute(n_sub)) - }) - -test_that("Unnamed fns in style pkg::fn enclosed in list() get nammed", - { - # SETUP ------------------------------------------------------------------- - ep <- - - mk_ep_0001_base( - data_prepare = mk_adae, - stat_by_strata_by_trt = list( - n_sub, - stats::AIC, - c(stats::BIC, x = "1"), - c("rst" = n_sub), - c("gtgsr" = stats::acf) - ) - ) - - # ACT --------------------------------------------------------------------- - actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) - - # EXPECT ------------------------------------------------------------------ - expect_equal(nrow(actual), 6) - expect_equal( - actual$fn_name, - c( - "mk_adae", - "n_sub", - "stats::AIC", - "stats::BIC", - "n_sub", - "stats::acf" - ) - ) - - }) - -test_that("Multiple unnamed fns enclosed in list() get nammed", - { - # SETUP ------------------------------------------------------------------- - ep <- mk_ep_0001_base(data_prepare = mk_adae,stat_by_strata_by_trt=list(n_sub, n_subev)) - # ACT --------------------------------------------------------------------- - actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) - - # EXPECT ------------------------------------------------------------------ - expect_equal(nrow(actual), 3) - expect_equal(actual$fn[[1]], substitute(mk_adae)) - expect_equal(actual$fn[[2]], substitute(n_sub)) - expect_equal(actual$fn[[3]], substitute(n_subev)) - }) - -test_that("Unnamed fns supplied in following style: list(c(fn, arg), fn) get nammed", - { - # SETUP ------------------------------------------------------------------- - ep <- - mk_ep_0001_base(data_prepare = mk_adae, - stat_by_strata_by_trt = list( - c(n_sub, subject_var = "USUBJID"), - c("rst" = n_subev), - c("rst" = n_subev, subject_var = "gta") - )) - # ACT --------------------------------------------------------------------- - actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) - - # EXPECT ------------------------------------------------------------------ - - expect_equal(nrow(actual), 4) - expect_equal(actual$fn_name, c("mk_adae", "n_sub", "n_subev", "n_subev")) - - }) +test_that("Unnamed naked fns get nammed", { + # SETUP ------------------------------------------------------------------- + ep <- mk_ep_0001_base(data_prepare = mk_adae, stat_by_strata_by_trt = n_sub) + # ACT --------------------------------------------------------------------- + actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) + + # EXPECT ------------------------------------------------------------------ + expect_equal(nrow(actual), 2) + expect_equal(actual$fn[[1]], substitute(mk_adae)) + expect_equal(actual$fn[[2]], substitute(n_sub)) +}) + +test_that("Unnamed fns enclosed in list() get nammed", { + # SETUP ------------------------------------------------------------------- + ep <- mk_ep_0001_base(data_prepare = mk_adae, stat_by_strata_by_trt = list(n_sub)) + # ACT --------------------------------------------------------------------- + actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) + + # EXPECT ------------------------------------------------------------------ + expect_equal(nrow(actual), 2) + expect_equal(actual$fn[[1]], substitute(mk_adae)) + expect_equal(actual$fn[[2]], substitute(n_sub)) +}) + +test_that("Unnamed fns in style pkg::fn enclosed in list() get nammed", { + # SETUP ------------------------------------------------------------------- + ep <- + mk_ep_0001_base( + data_prepare = mk_adae, + stat_by_strata_by_trt = list( + n_sub, + stats::AIC, + c(stats::BIC, x = "1"), + c("rst" = n_sub), + c("gtgsr" = stats::acf) + ) + ) + + # ACT --------------------------------------------------------------------- + actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) + + # EXPECT ------------------------------------------------------------------ + expect_equal(nrow(actual), 6) + expect_equal( + actual$fn_name, + c( + "mk_adae", + "n_sub", + "stats::AIC", + "stats::BIC", + "n_sub", + "stats::acf" + ) + ) +}) + +test_that("Multiple unnamed fns enclosed in list() get nammed", { + # SETUP ------------------------------------------------------------------- + ep <- mk_ep_0001_base(data_prepare = mk_adae, stat_by_strata_by_trt = list(n_sub, n_subev)) + # ACT --------------------------------------------------------------------- + actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) + + # EXPECT ------------------------------------------------------------------ + expect_equal(nrow(actual), 3) + expect_equal(actual$fn[[1]], substitute(mk_adae)) + expect_equal(actual$fn[[2]], substitute(n_sub)) + expect_equal(actual$fn[[3]], substitute(n_subev)) +}) + +test_that("Unnamed fns supplied in following style: list(c(fn, arg), fn) get nammed", { + # SETUP ------------------------------------------------------------------- + ep <- + mk_ep_0001_base( + data_prepare = mk_adae, + stat_by_strata_by_trt = list( + c(n_sub, subject_var = "USUBJID"), + c("rst" = n_subev), + c("rst" = n_subev, subject_var = "gta") + ) + ) + # ACT --------------------------------------------------------------------- + actual <- unnest_by_fns(ep, cols = c("data_prepare", "stat_by_strata_by_trt")) + + # EXPECT ------------------------------------------------------------------ + + expect_equal(nrow(actual), 4) + expect_equal(actual$fn_name, c("mk_adae", "n_sub", "n_subev", "n_subev")) +}) diff --git a/tests/testthat/test-unnest_endpoint_functions.R b/tests/testthat/test-unnest_endpoint_functions.R index 1f502a6..df043ed 100644 --- a/tests/testthat/test-unnest_endpoint_functions.R +++ b/tests/testthat/test-unnest_endpoint_functions.R @@ -4,17 +4,19 @@ test_that("Unnest all functions", { crit_ep_dummy <- function(...) { return(T) } - crit_sgd_dummy <- function(...){ + crit_sgd_dummy <- function(...) { return(T) } - crit_sga_dummy <- function(...){ + crit_sga_dummy <- function(...) { return(T) } ep <- mk_ep_0001_base( data_prepare = mk_adae, - stat_by_strata_by_trt = list("n_sub" = n_sub, - "n_subev" = n_subev), + stat_by_strata_by_trt = list( + "n_sub" = n_sub, + "n_subev" = n_subev + ), stat_by_strata_across_trt = list("n_subev_trt_diff" = n_subev_trt_diff), stat_across_strata_across_trt = list("P-interaction" = contingency2x2_strata_test), crit_endpoint = list(crit_ep_dummy), @@ -45,7 +47,9 @@ test_that("Unnest all functions", { # expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) == "language"}))), TRUE) # ** Temporary ** # Check that content of each fn is language or symbol - expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) %in% c("language", "symbol")}))), TRUE) + expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x) { + typeof(x) %in% c("language", "symbol") + }))), TRUE) # Check uniqueness of fn_hash expect_equal(anyDuplicated(ep_fn[["fn_hash"]]), 0) @@ -63,10 +67,12 @@ test_that("Unnest all functions", { "crit_sgd_dummy" = "crit_by_strata_by_trt", "crit_sga_dummy" = "crit_by_strata_across_trt" ) - expect_equal(all(apply(ep_fn, 1, - function(x) { - return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) - })), TRUE) + expect_equal(all(apply( + ep_fn, 1, + function(x) { + return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) + } + )), TRUE) }) @@ -76,10 +82,10 @@ test_that("Unnest criterion functions", { crit_ep_dummy <- function(...) { return(T) } - crit_sgd_dummy <- function(...){ + crit_sgd_dummy <- function(...) { return(T) } - crit_sga_dummy <- function(...){ + crit_sga_dummy <- function(...) { return(T) } @@ -87,7 +93,8 @@ test_that("Unnest criterion functions", { data_prepare = mk_adae, stat_by_strata_by_trt = list( "n_sub" = n_sub, - "n_subev" = n_subev), + "n_subev" = n_subev + ), stat_by_strata_across_trt = list("n_subev_trt_diff" = n_subev_trt_diff), stat_across_strata_across_trt = list("P-interaction" = contingency2x2_ptest), crit_endpoint = list(crit_ep_dummy), @@ -97,13 +104,15 @@ test_that("Unnest criterion functions", { ep <- add_id(ep) - fn_cols <- c("crit_endpoint", "crit_by_strata_by_trt", - "crit_by_strata_across_trt") + fn_cols <- c( + "crit_endpoint", "crit_by_strata_by_trt", + "crit_by_strata_across_trt" + ) # ACT --------------------------------------------------------------------- - ep_fn <- suppressWarnings(unnest_endpoint_functions(ep, fn_cols=fn_cols)) + ep_fn <- suppressWarnings(unnest_endpoint_functions(ep, fn_cols = fn_cols)) # EXPECT ------------------------------------------------------------------ @@ -121,7 +130,9 @@ test_that("Unnest criterion functions", { # expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) == "language"}))), TRUE) # ** Temporary ** # Check that content of each fn is language or symbol - expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) %in% c("language", "symbol")}))), TRUE) + expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x) { + typeof(x) %in% c("language", "symbol") + }))), TRUE) # Check uniqueness of fn_hash expect_equal(anyDuplicated(ep_fn[["fn_hash"]]), 0) @@ -134,10 +145,12 @@ test_that("Unnest criterion functions", { "crit_sgd_dummy" = "crit_by_strata_by_trt", "crit_sga_dummy" = "crit_by_strata_across_trt" ) - expect_equal(all(apply(ep_fn, 1, - function(x) { - return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) - })), TRUE) + expect_equal(all(apply( + ep_fn, 1, + function(x) { + return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) + } + )), TRUE) }) @@ -146,8 +159,10 @@ test_that("Unnest statistical functions", { ep <- mk_ep_0001_base( data_prepare = mk_adae, - stat_by_strata_by_trt = list("n_sub" = n_sub, - "n_subev" = n_subev), + stat_by_strata_by_trt = list( + "n_sub" = n_sub, + "n_subev" = n_subev + ), stat_by_strata_across_trt = list("n_subev_trt_diff" = n_subev_trt_diff), stat_across_strata_across_trt = list("P-interaction" = contingency2x2_strata_test) ) @@ -159,7 +174,7 @@ test_that("Unnest statistical functions", { # ACT --------------------------------------------------------------------- - ep_fn <- suppressWarnings(unnest_endpoint_functions(ep, fn_cols=fn_cols)) + ep_fn <- suppressWarnings(unnest_endpoint_functions(ep, fn_cols = fn_cols)) # EXPECT ------------------------------------------------------------------ @@ -177,7 +192,9 @@ test_that("Unnest statistical functions", { # expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) == "language"}))), TRUE) # ** Temporary ** # Check that content of each fn is language or symbol - expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) %in% c("language", "symbol")}))), TRUE) + expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x) { + typeof(x) %in% c("language", "symbol") + }))), TRUE) # Check uniqueness of fn_hash expect_equal(anyDuplicated(ep_fn[["fn_hash"]]), 0) @@ -191,10 +208,12 @@ test_that("Unnest statistical functions", { "n_subev_trt_diff" = "stat_by_strata_across_trt", "P-interaction" = "stat_across_strata_across_trt" ) - expect_equal(all(apply(ep_fn, 1, - function(x) { - return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) - })), TRUE) + expect_equal(all(apply( + ep_fn, 1, + function(x) { + return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) + } + )), TRUE) }) test_that("Unnest adam and adsl functions", { @@ -211,7 +230,7 @@ test_that("Unnest adam and adsl functions", { # ACT --------------------------------------------------------------------- - ep_fn <- suppressWarnings(unnest_endpoint_functions(ep, fn_cols=fn_cols)) + ep_fn <- suppressWarnings(unnest_endpoint_functions(ep, fn_cols = fn_cols)) # EXPECT ------------------------------------------------------------------ @@ -229,7 +248,9 @@ test_that("Unnest adam and adsl functions", { # expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) == "language"}))), TRUE) # ** Temporary ** # Check that content of each fn is language or symbol - expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x){typeof(x) %in% c("language", "symbol")}))), TRUE) + expect_equal(all(unlist(lapply(ep_fn[["fn"]], function(x) { + typeof(x) %in% c("language", "symbol") + }))), TRUE) # Check uniqueness of fn_hash expect_equal(anyDuplicated(ep_fn[["fn_hash"]]), 0) @@ -240,8 +261,10 @@ test_that("Unnest adam and adsl functions", { lookup <- c( "mk_adae" = "data_prepare" ) - expect_equal(all(apply(ep_fn, 1, - function(x) { - return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) - })), TRUE) + expect_equal(all(apply( + ep_fn, 1, + function(x) { + return(x[["fn_type"]] == lookup[[x[["fn_name"]]]]) + } + )), TRUE) }) diff --git a/tests/testthat/test-use_chef.R b/tests/testthat/test-use_chef.R index e110df7..9049c66 100644 --- a/tests/testthat/test-use_chef.R +++ b/tests/testthat/test-use_chef.R @@ -1,156 +1,156 @@ -test_that("use_chef makes top-level dirs and files", - { - # SETUP ------------------------------------------------------------------- - testr::skip_on_devops() - testr::create_local_project() - - # ACT --------------------------------------------------------------------- - use_chef(pipeline_id = "001") - - # EXPECT ------------------------------------------------------------------ - actual <- list.dirs() - expect_equal(actual, c(".", "./R", "./pipeline")) - expect_equal(list.files(), sort(c("R", "_targets.yaml", "pipeline"))) - }) - -test_that("use_chef makes top-level dirs and fils when in Rproj", - { - # SETUP ------------------------------------------------------------------- - testr::skip_on_devops() - testr::create_local_project(rstudio = TRUE) - # ACT --------------------------------------------------------------------- - use_chef(pipeline_id = "001") - - # EXPECT ------------------------------------------------------------------ - actual <- list.dirs() - expect_equal(actual, c(".", "./R", "./pipeline")) - proj_files <- list.files( pattern = "\\.Rproj$") - actual <- setdiff(list.files(), proj_files) - expect_equal(actual, sort(c("R", "_targets.yaml", "pipeline"))) - }) - - -test_that("use_chef writes default R files", - { - # SETUP ------------------------------------------------------------------- - testr::skip_on_devops() - testr::create_local_project() - - # ACT --------------------------------------------------------------------- - use_chef(pipeline_id = "001") - - # EXPECT ------------------------------------------------------------------ - expect_equal(list.files("R/"), - sort(c("mk_adam_scaffold.R", "mk_endpoint_def.R", "packages.R"))) - expect_equal(list.files("pipeline/"), - c("pipeline_001.R")) - - }) - -test_that("use_chef writes ammnog crit functions", - { - # SETUP ------------------------------------------------------------------- - testr::skip_on_devops() - testr::create_local_project() - crit_endpoint <- function() { - "check" - } - # ACT --------------------------------------------------------------------- - use_chef(pipeline_id = "001", mk_criteria_fn = crit_endpoint) - - # EXPECT ------------------------------------------------------------------ - actual <- list.files("R/") - expect_equal( - actual, - c( - "crit_endpoint.R", - "mk_adam_scaffold.R", - "mk_endpoint_def.R", - "packages.R" - ) - ) - x <- readLines("R/crit_endpoint.R") - expect_true(any(grepl("\"check\"", x = x))) - }) - -test_that("use_chef writes custom mk_endpoint_def fn, and uses standard name", - { - # SETUP ------------------------------------------------------------------- - testr::skip_on_devops() - testr::create_local_project() - mk_endpoint_custom <- function() { - "check" - } - # ACT --------------------------------------------------------------------- - use_chef(pipeline_id = "001", mk_endpoint_def_fn = mk_endpoint_custom) - - # EXPECT ------------------------------------------------------------------ - - actual <- list.files("R/") - expect_equal(actual, - c("mk_adam_scaffold.R", "mk_endpoint_def.R", "packages.R")) - x <- readLines("R/mk_endpoint_def.R") - expect_true(any(grepl("\"check\"", x = x))) - }) - - -test_that("use_chef writes custom mk_adam fn", - { - # SETUP ------------------------------------------------------------------- - testr::skip_on_devops() - testr::create_local_project() - mk_adam_custom <- function() { - "check" - } - # ACT --------------------------------------------------------------------- - use_chef(pipeline_id = "001", mk_adam_fn = mk_adam_custom) - - # EXPECT ------------------------------------------------------------------ - actual <- list.files("R/") - expect_equal(actual, - c( - "mk_adam_custom.R", - "mk_endpoint_def.R", - "packages.R" - )) - x <- readLines("R/mk_adam_custom.R") - expect_true(any(grepl("\"check\"", x = x))) - }) - -test_that("use_chef writes multiple mk_adam fn's", - { - # SETUP ------------------------------------------------------------------- - testr::skip_on_devops() - testr::create_local_project() - mk_adam_custom <- function() { - "check" - } - mk_adam_custom_2 <- function() { - "check_2" - } - # ACT --------------------------------------------------------------------- - use_chef( - pipeline_id = "001", - mk_adam_fn = list(mk_adam_custom, mk_adam_custom_2) - ) - - # EXPECT ------------------------------------------------------------------ - actual <- list.files("R/") - expect_equal( - actual, - c( - "mk_adam_custom.R", - "mk_adam_custom_2.R", - "mk_endpoint_def.R", - "packages.R" - ) - ) - x <- readLines("R/mk_adam_custom.R") - expect_true(any(grepl("\"check\"", x = x))) - - x <- readLines("R/mk_adam_custom_2.R") - expect_true(any(grepl("\"check_2\"", x = x))) - }) +test_that("use_chef makes top-level dirs and files", { + # SETUP ------------------------------------------------------------------- + testr::skip_on_devops() + testr::create_local_project() + + # ACT --------------------------------------------------------------------- + use_chef(pipeline_id = "001") + + # EXPECT ------------------------------------------------------------------ + actual <- list.dirs() + expect_equal(actual, c(".", "./R", "./pipeline")) + expect_equal(list.files(), sort(c("R", "_targets.yaml", "pipeline"))) +}) + +test_that("use_chef makes top-level dirs and fils when in Rproj", { + # SETUP ------------------------------------------------------------------- + testr::skip_on_devops() + testr::create_local_project(rstudio = TRUE) + # ACT --------------------------------------------------------------------- + use_chef(pipeline_id = "001") + + # EXPECT ------------------------------------------------------------------ + actual <- list.dirs() + expect_equal(actual, c(".", "./R", "./pipeline")) + proj_files <- list.files(pattern = "\\.Rproj$") + actual <- setdiff(list.files(), proj_files) + expect_equal(actual, sort(c("R", "_targets.yaml", "pipeline"))) +}) + + +test_that("use_chef writes default R files", { + # SETUP ------------------------------------------------------------------- + testr::skip_on_devops() + testr::create_local_project() + + # ACT --------------------------------------------------------------------- + use_chef(pipeline_id = "001") + + # EXPECT ------------------------------------------------------------------ + expect_equal( + list.files("R/"), + sort(c("mk_adam_scaffold.R", "mk_endpoint_def.R", "packages.R")) + ) + expect_equal( + list.files("pipeline/"), + c("pipeline_001.R") + ) +}) + +test_that("use_chef writes ammnog crit functions", { + # SETUP ------------------------------------------------------------------- + testr::skip_on_devops() + testr::create_local_project() + crit_endpoint <- function() { + "check" + } + # ACT --------------------------------------------------------------------- + use_chef(pipeline_id = "001", mk_criteria_fn = crit_endpoint) + + # EXPECT ------------------------------------------------------------------ + actual <- list.files("R/") + expect_equal( + actual, + c( + "crit_endpoint.R", + "mk_adam_scaffold.R", + "mk_endpoint_def.R", + "packages.R" + ) + ) + x <- readLines("R/crit_endpoint.R") + expect_true(any(grepl("\"check\"", x = x))) +}) + +test_that("use_chef writes custom mk_endpoint_def fn, and uses standard name", { + # SETUP ------------------------------------------------------------------- + testr::skip_on_devops() + testr::create_local_project() + mk_endpoint_custom <- function() { + "check" + } + # ACT --------------------------------------------------------------------- + use_chef(pipeline_id = "001", mk_endpoint_def_fn = mk_endpoint_custom) + + # EXPECT ------------------------------------------------------------------ + + actual <- list.files("R/") + expect_equal( + actual, + c("mk_adam_scaffold.R", "mk_endpoint_def.R", "packages.R") + ) + x <- readLines("R/mk_endpoint_def.R") + expect_true(any(grepl("\"check\"", x = x))) +}) + + +test_that("use_chef writes custom mk_adam fn", { + # SETUP ------------------------------------------------------------------- + testr::skip_on_devops() + testr::create_local_project() + mk_adam_custom <- function() { + "check" + } + # ACT --------------------------------------------------------------------- + use_chef(pipeline_id = "001", mk_adam_fn = mk_adam_custom) + + # EXPECT ------------------------------------------------------------------ + actual <- list.files("R/") + expect_equal( + actual, + c( + "mk_adam_custom.R", + "mk_endpoint_def.R", + "packages.R" + ) + ) + x <- readLines("R/mk_adam_custom.R") + expect_true(any(grepl("\"check\"", x = x))) +}) + +test_that("use_chef writes multiple mk_adam fn's", { + # SETUP ------------------------------------------------------------------- + testr::skip_on_devops() + testr::create_local_project() + mk_adam_custom <- function() { + "check" + } + mk_adam_custom_2 <- function() { + "check_2" + } + # ACT --------------------------------------------------------------------- + use_chef( + pipeline_id = "001", + mk_adam_fn = list(mk_adam_custom, mk_adam_custom_2) + ) + + # EXPECT ------------------------------------------------------------------ + actual <- list.files("R/") + expect_equal( + actual, + c( + "mk_adam_custom.R", + "mk_adam_custom_2.R", + "mk_endpoint_def.R", + "packages.R" + ) + ) + x <- readLines("R/mk_adam_custom.R") + expect_true(any(grepl("\"check\"", x = x))) + + x <- readLines("R/mk_adam_custom_2.R") + expect_true(any(grepl("\"check_2\"", x = x))) +}) test_that("use_chef set-up in README works", { @@ -219,5 +219,4 @@ test_that("use_chef with custom pipeline_dir names works", { ) # EXPECT ------------------------------------------------------------------ expect_equal(list.files(path = "./pipeline"), "pipeline_01.R") - }) diff --git a/tests/testthat/test-validate_usr_fn_args.R b/tests/testthat/test-validate_usr_fn_args.R index 8fbef06..ba245ad 100644 --- a/tests/testthat/test-validate_usr_fn_args.R +++ b/tests/testthat/test-validate_usr_fn_args.R @@ -1,6 +1,5 @@ test_that("error if expecting more variables", { - - my_data_prepare <- function(study_metadata, some_specific_var){ + my_data_prepare <- function(study_metadata, some_specific_var) { a <- study_metadata b <- some_specific_var } @@ -12,15 +11,14 @@ test_that("error if expecting more variables", { ), sprintf( "Function (%s) of type (%s) expects argument(s) which is not supplied", - "my_data_prepare", "data_prepare"), - fixed=TRUE + "my_data_prepare", "data_prepare" + ), + fixed = TRUE ) - - }) -test_that("error is not thrown for partialized functions",{ - my_data_prepare <- function(study_metadata, some_specific_var){ +test_that("error is not thrown for partialized functions", { + my_data_prepare <- function(study_metadata, some_specific_var) { a <- study_metadata b <- some_specific_var @@ -39,26 +37,25 @@ test_that("error is not thrown for partialized functions",{ ), sprintf( "Function (%s) of type (%s) expects argument(s) which is not supplied", - "my_data_prepare", "data_prepare" ), - fixed=TRUE + "my_data_prepare", "data_prepare" + ), + fixed = TRUE ) expect_na_or_null( validate_usr_fn_args( fn = my_data_partial, fn_type = "data_prepare" - ) + ) ) -} -) - -test_that("Under defined functions fail, but is rescued by dots.",{ +}) - my_fun <- function(){ - 1+1 - } - my_fun_dots <- function(...){ - 1+1 +test_that("Under defined functions fail, but is rescued by dots.", { + my_fun <- function() { + 1 + 1 + } + my_fun_dots <- function(...) { + 1 + 1 } expect_error( @@ -67,7 +64,7 @@ test_that("Under defined functions fail, but is rescued by dots.",{ fn_type = "data_prepare" ), "is supplied arguments it does not expect", - fixed=TRUE + fixed = TRUE ) expect_na_or_null( @@ -76,39 +73,36 @@ test_that("Under defined functions fail, but is rescued by dots.",{ fn_type = "data_prepare" ) ) - }) -test_that("Ekstra args but with default args are allowed",{ - my_data_prep <- function(study_metadata, arg_no_default, ...){ +test_that("Ekstra args but with default args are allowed", { + my_data_prep <- function(study_metadata, arg_no_default, ...) { message(study_metadata, arg_no_default, ...) } expect_error( validate_usr_fn_args( fn = my_data_prep, - fn_type = "data_prepare"), + fn_type = "data_prepare" + ), "expects argument(s) which is not supplied", - fixed=TRUE - ) + fixed = TRUE + ) - my_data_prep <- function(study_metadata, arg_with_default=1, ...){ + my_data_prep <- function(study_metadata, arg_with_default = 1, ...) { message(study_metadata, arg_with_default) } expect_na_or_null( validate_usr_fn_args( - fn=my_data_prep, + fn = my_data_prep, fn_type = "data_prepare" ) ) - - }) -test_that("Test implementation in mk_userdef_fn_dt",{ - +test_that("Test implementation in mk_userdef_fn_dt", { crit_endpoint <- function(...) { return(T) } @@ -119,8 +113,12 @@ test_that("Test implementation in mk_userdef_fn_dt",{ return(T) } - stat_bad_input <- function(dat, missing_arg){"woooh"} - stat_good_input <- function(dat, cell_index, defaulted_arg=1, ...){"wububu"} + stat_bad_input <- function(dat, missing_arg) { + "woooh" + } + stat_good_input <- function(dat, cell_index, defaulted_arg = 1, ...) { + "wububu" + } ep_good <- mk_endpoint_str( study_metadata = list(), @@ -174,19 +172,17 @@ test_that("Test implementation in mk_userdef_fn_dt",{ ep_fn_map_good <- suppressWarnings(unnest_endpoint_functions(ep_good)) -# ACT --------------------------------------------------------------------- + # ACT --------------------------------------------------------------------- expect_true( inherits( mk_userdef_fn_dt(ep_fn_map_good, env = environment()), "data.table" - ) + ) ) expect_error( mk_userdef_fn_dt(ep_fn_map_err, env = environment()), "Function (stat_bad_input) of type (stat_by_strata_by_trt) expects argument(s) which is not supplied", - fixed=TRUE + fixed = TRUE ) - - }) diff --git a/vignettes/.gitignore b/vignettes/.gitignore deleted file mode 100644 index 097b241..0000000 --- a/vignettes/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.html -*.R diff --git a/vignettes/chef.Rmd b/vignettes/chef.Rmd deleted file mode 100644 index 1e52a36..0000000 --- a/vignettes/chef.Rmd +++ /dev/null @@ -1,272 +0,0 @@ ---- -title: "Getting started" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Getting started} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -Here we start with an empty R project and walk through an example analysis. Our goal for this analysis is to report the number of subjects experiencing a mild adverse event in each treatment arm stratified by a custom age grouping. For this example we use the ADCM dataset provided in the {pharmaverseadam} package. - -The outline of the workflow will be: - - 1. Set up our project infrastructure by running `chef::use_chef` - 2. Specify our endpoint - 3. Define function to produce our ADaM data - 4. Define function to calculate the statistics we want as results (i.e. number of events) - 5. Run the pipeline and inspect the results - - -# 1. Set up project infrastructure - - -```{r, include=FALSE} -# Set up temporary project for vignette -library(chef) -wd_old <- getwd() -prj_old <- suppressMessages(usethis::proj_get()) -suppressMessages(testr::create_local_project()) -tmp_dir <- getwd() -mk_endpoint_def <- function() { - mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("AGEGR2")), - data_prepare = mk_adcm, - endpoint_label = "A", - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list("n_subev" = c(n_subev)) - ) -} -mk_adcm <- function(study_metadata){ - adcm <- data.table::as.data.table(pharmaverseadam::adcm) - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - adsl[, AGEGR2 := data.table::fcase(AGE < 70, "AGE < 70", - AGE >= 70, "AGE >= 70")] - adcm_out <- - merge(adsl, adcm[, c(setdiff(names(adcm), names(adsl)), "USUBJID"), with = - F], by = "USUBJID", all = TRUE) - adcm_out[] -} -# Number of subjects with events -n_subev <- function(dat, - event_index, - cell_index, - subjectid_var, - ...) { - - - stat <- dat[J(intersect(cell_index, event_index))] %>% - unique(., by = c(subjectid_var)) %>% - nrow() - - return(data.table( - description = "Number of subjects with events", - qualifiers = NA_character_, - label = "n", - value = stat - )) -} -chef::use_chef(pipeline_id = "01", mk_endpoint_def_fn = mk_endpoint_def, mk_adam_fn = list(mk_adcm, n_subev)) -``` - -**This assumes you have set up an RStudio project (or equivalent).** If you have not done so, do that first. - -To setup a chef project you need: - - - A `R/` directory where all project-specific R code will be stored. This will include: - - Any functions used to make the `ADaM` data ingested by the chef pipeline - - The R function that produces the endpoint specification object - - Any analysis/statistical functions that are not sourced from other R packages - - A script containing `library()` calls to any package needed for the pipeline to run - - A `pipeline/` directory where the `targets` pipeline(s) is/are defined - - A `targets.yml` file tracking the different pipelines - -The file file structure should look like this: -``` -/ - |-- R/ - |--- mk_endpoint_definition.R - |--- mk_adam.R - |--- packages.R - |-- pipeline/ - |--- pipeline_01.R - |-- _targets.yaml - -``` - -{chef} has a convenience function to set up this infrastructure for you: - -```{r eval=FALSE} - library(chef) - chef::use_chef( - pipeline_id = "01" - ) - -``` - - This sets up the following file structure: - - - - For now we need to know what the file in `R/` do. For the `_targets.yml` and `pipeline_01.R` explanation, see `vignette("pipeline")` - -# 1. Specify an endpoint - -Endpoint specifications need to be created inside a function, in this case the function defined in the `mk_endpoint_definition.R` - -An endpoint is created by using the `mk_endpoint_str()` function. For an explanation of how to specify endpoints, see `vignette("endpoint_definitions")`. - -Here we specify a minimal working endpoint based on the `ADCM` dataset supplied by {pharmaverseadam}. We do this by modifying the `R/mk_endpoint_definition.R` file so that is looks like this: - -```{r, eval=F} -mk_endpoint_def <- function() { - mk_endpoint_str( - study_metadata = list(), - pop_var = "SAFFL", - pop_value = "Y", - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("AGEGR2")), - data_prepare = mk_adcm, - endpoint_label = "A", - custom_pop_filter = "TRT01A %in% c('Placebo', 'Xanomeline High Dose')", - stat_by_strata_by_trt = list("n_subev" = c(n_subev)) - ) -} -``` - -You might notice a couple things with this specification: - -- Even though we are using the ADCM dataset from {pharmavreseadam}, there is no reference to this in the endpoint specification This is because the input clinical data is created via the `adam_fn` field, so in this case the reference to the `ADCM` data set will be inside the `mk_adcm` function (see next section). - - In the `stratify_by` field we refer to a variable called `AGEGR2`, however the ADCM dataset from {pharmaverseadam} does not contain any such variable. This is because we will derive this variable inside `mk_adcm` (see next section). - - -# 2. Define the input dataset - -We also need to provide chef with the `ADCM` input data set that that corresponds to the endpoint specified above. To read more about make these data sets, see `vignette("mk_adam")`. We can see that we have strata based on a `AGEGR2`, which can be derived from the `AGE` variable in `ADSL`. -For now, we write a simple `ADaM` function `mk_adcm` that merges the `ADSL` data set (enriched with `AGEGR2`) onto the `ADCM` data set, thereby creating the input data set. - -```{r, eval=F} -mk_adcm <- function(study_metadata){ - adcm <- data.table::as.data.table(pharmaverseadam::adcm) - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - adsl[, AGEGR2 := data.table::fcase(AGE < 70, "AGE < 70", - AGE >= 70, "AGE >= 70")] - adcm_out <- - merge(adsl, adcm[, c(setdiff(names(adcm), names(adsl)), "USUBJID"), with = - F], by = "USUBJID", all = TRUE) - adcm_out[] -} - -``` - -# 3. Define the analysis methods - -Now that we have specified the endpoint to be analyzed, and defined the analysis data set for {chef}, we need to define the analysis itself. - -Our goal for this analysis is to count the number of events experiencing an event. We need to define a function that makes those calculations, and give that function to chef. Because we want a result per treatment arm - strata combination, we must provide the function in the `stat_by_strata_by_trt` argument in the endpoint specification. We have already this argument set to `n_events` in the example endpoint specification above, so now need to define the `n_events` function. To see more about how to define analysis functions, see. For now we use this simple function that simply counts the number of rows: - -```{r, eval=F} -n_subev <- function(dat, - event_index, - cell_index, - treatment_var, - treatment_val, - strata_var, - strata_val, - subjectid_var, - ...) { - stat <- dat[J(cell_index)] %>% - .[, event_match := INDEX_ %in% event_index] %>% - unique(., by = c(subjectid_var, "event_match")) %>% - .[["event_match"]] %>% - sum() - - return(data.table( - decription = "Number of subjects with events", - qualifiers = NA_character_, - label = "n", - value = stat - )) -} -``` - -# 4. Run the analysis pipeline - -Now that all the inputs are defined, we can run the pipeline. This is achieved by a call to `tar_make()` from the {targets} package. -```{r,eval=FALSE} -tar_make() -``` - -Targets will show you which steps in the pipeline are executed and how long each step took: - -```{r, echo=FALSE} -setwd(tmp_dir) # Do not run, only needed to get the markdown file to run -tar_make() - - -``` - -Then, to see the results, you load the cached step of the pipeline corresponding to the results. In our case it will be `ep_stat`, so to load it into the sessions as an object we call - -```{r, eval=FALSE} -tar_load(ep_stat) - -``` - -Now `ep_stat` is an R object like any other. Thus we can look at our results simply by running - -``` -ep_stat -``` - -However, there is a lot of extra data included in the object, so lets look at a column subsection of the first 5 rows: - -```{r, eval=FALSE} -ep_stat[, .( - treatment_var, - treatment_refval, - period_var, - period_value, - strata_var, - stat_result_value -)] |> head() - -``` - - -```{r, echo=FALSE} -setwd(tmp_dir) # Do not run, only needed to get the markdown file to run -tar_load(ep_stat) -ep_stat[, .( - stat_result_id, - treatment_var, - treatment_refval, - period_var, - period_value, - strata_var, - stat_result_value -)] |> head() -``` - - - -# 5. Pass the data on to TFL formatting - -Now that the data is produced, you can pass it on for TFL formatting (outside the scope of {chef}). diff --git a/vignettes/debugging.Rmd b/vignettes/debugging.Rmd deleted file mode 100644 index 25f9352..0000000 --- a/vignettes/debugging.Rmd +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: "Debugging" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Debugging} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -Debugging a chef pipeline is different than debugging normal R scripts or functions. This is because under the hood, chef makes heavy use of the {[targets](https://books.ropensci.org/targets/)} package. - -We present here two main approaches to debugging: - - - Use the chef helper functions - - Use the {[targets](https://books.ropensci.org/targets/)} helper functions - -The chef debugging helper functions are best for normal debugging situations, while using the {targets} helper functions may be needed for more in-depth debugging sessions. - - -## Context - why debugging is different - -In a chef pipeline, the execution environment is managed by {[targets](https://books.ropensci.org/targets/)}, and does **not** take place in your working environment. As a side-effect, it is not be as straightforward to reproduce errors or trace them back to their source. Specifically: - - - Each target in a {targets} pipeline is run in an new R session that closes upon completion or hitting an error. This means that the working environment is clean for every run, which is beneficial for reproducibility but can make it harder to debug because the state that led to an error might not be readily accessible. - - - Targets uses a caching mechanism to avoid re-running successful tasks. While this feature enhances efficiency, it can make debugging tricky. Understanding whether a bug is due to current code or cached results might not be straightforward - - - Sometimes, the error messages from a targets pipeline can be cryptic or not very informative, making it harder to figure out what's going wrong. - - - Debugging often requires interactively running code to inspect objects and their states. However, {targets} is designed for non-interactive batch (and sometimes parallel) execution, which can make interactive debugging less straightforward. For example, you cannot just insert a `browser()` into the function that errored out like you would in interactive debugging. - -## Chef style debugging - -The most common errors will be due to errors stemming from improper user inputs (e.g. the user-supplied functions that that generate the input ADaM datasets, or contain the statistical methods). To debug these, it is easiest if the programmer has access to the state of the program at the time it errored-out. - - -Errors stemming from user function are split into two types: - -- Firstly, those relating to the function formals or more specifically whether the function inputs will match with those supplied by {chef}. - -- Secondly, errors which arise during the evaluation of the function and validation of its output. These errors can come from bug in the functions causing crashing at runtime, or functions which return an invalid output. - -### Input errors. - -Input errors stems from mismatch between the expected arguments of a function and those arguments which {chef} supplies. Errors in this domain could stem from improperly defined statistical functions or for instance statistical functions applied to wrong statistical types in the endpoint specification. - -For each function that the user can define {chef} supplies a predefined set of arguments ([data](ep_spec_adam_data.html#overview), [statistics](methods_stat.html#input-specifications-and-examples), [criteria](methods_criteria.html#input-specifications)). These arguments are always supplied, why function must either have an exhaustive function definition, or as *recommended* include dots (...) in the function definition to allow the functions to ignore those arguments, which are not required by the specific function. - -##### Examples: - -An **over-defined function** is a function which require arguments which are not supplied. - -``` -# Given a data_prepare function: -my_data <- function(study_metadata, specific_arg){ - #... -} - -# Would give rise to the following error: - -Function (my_data) of type (data_prepare) expects argument(s) which is not supplied: - specific_arg -Supplied arguments: - study_metadata -Expected arguments: (required, [optional]) - specific_arg, study_metadata [] -``` - -In the above example, the expect-but-no-supplied argument `specific_arg` is clearly described as well as those arguments chef supplies (`study_metadata`) and the full function argument specification. - ---- - -An **under-defined function** is a function which does not explicitly require all the supplied arguments and also doesn't have dots (...) included to catch surplus arguments. - -``` -# Given a data_prepare function: -my_data <- function(arg_w_default=2){ - #... -} - -# Would give rise to the following error: - -Function (my_data) of type (data_prepare) is supplied arguments it does not expect: - study_metadata -Supplied arguments: - study_metadata -Expected arguments: (required, [optional]) - [my_def_arg] -Either state all supplied args explicitely or use dots (...) as a passthrough (recommended). - -``` - -In the under-defined example the function does not expect the supplied `study_metadata` argument. This example also shows how optional arguments (with default values) are displayed. - -### Evaluation and validation errors. - -Chef has built-in helpers that provide the user the state of the pipeline when a user-supplied function errors-out. By using these helpers, the programmer can access the workspace (i.e., all objects and functions) at the point of the error. Then they can debug interactively like normal R debugging. - -In broad terms Chef will supply the user with the function and input parameters that lead to an erroneous evaluation. Chef will supply debugging sessions if the function crashes or if the function output is not compliant.([Statistical output](methods_stat.html#output-specification-shared-for-all-types), [Criterion output](methods_criteria.html#output-specifications)) - -For failures during the evaluation the error message will contain the keyword `EVALUATE` while non-conforming outputs will result in errors with the `VALIDATE` keyword. Sample error messages can be seen in the following. - -``` -# Error message wrt. validation - -Error during evaluation of: log(1) -Failed to VALIDATE function output with error: -Expected (data.table::data.table) Found: numeric - -# Error message wrt. evaluation. - -Error during evaluation of: P-interaction -Failed to EVALUATE function with error: - Error : argument "cell_index" is missing, with no default -``` - -##### Debugging session: - -When an evaluation fails or the output is non-compliant {chef} will collect the inputs to the function and the function and write it to file as a new environment object. - -The debugging environment will be written to a debugging subfolder of the current working directory as an .RDS file. -The problem can then be investigated using the built-in function `chef::load_debug_session()` or by loading it directly into your session using `readRDS()`. - - -##### Debugging example: - -In this example we will review the debugging flow for a faulty stat method. - -1. We define an endpoint with a single statistic. -```{r, eval=F} -mk_ep_def <- function() { - ep <- mk_endpoint_str( - ..., # The rest of the input parameters. - stat_by_strata_by_trt = list("n_sub" = n_sub), - ) - } -``` - -2. We then setup the pipeline as described in [End-to-End examples](example_end2end.html). - -3. We execute the pipeline with `tar_make()` - -We can then check the output messages from the target run: -The `▶` indicates a starting target evaluation and `●`, `✖` indicates a successful and failed evaluation respectably. - -__Console output__ -``` -tar_make() -Loading required package: targets -▶ start target ep -● built target ep [0.038 seconds] -▶ start target ep_id -● built target ep_id [0.001 seconds] -(...) # We ignore some other upstream target evaluations. -▶ start target ep_stat_by_strata_by_trt -✖ error target ep_stat_by_strata_by_trt -▶ end pipeline [3.556 seconds] -Error: Error running targets::tar_make() - Error messages: targets::tar_meta(fields = error, complete_only = TRUE) - Debugging guide: https://books.ropensci.org/targets/debugging.html - How to ask for help: https://books.ropensci.org/targets/help.html - Last error: -Error during evaluation of: n_sub -Failed to EVALUATE function with error: - Error in fn_callable[[1]](dat = dat[[1]], treatment_var = treatment_var, : - Calculation impossible! - ---- -Debugging session created: Launch with: - chef::load_debug_session('/tmp/RtmpCiXPPf/testproj275bcd76287e8d/debug/n_sub.Rdata') ---- -``` -You can see that the pipeline failed during the evaluation of the target-step `ep_stat_by_strata_by_trt`. Furthermore, you can view the `Last error` to get a meaningful error message. -Here it shows that it failed to evaluate the function aliased `n_sub` and an error with the message `Calculation impossible` - -We can inspect the problem further running the suggested command: -- `chef::load_debug_session('/tmp/RtmpCiXPPf/testproj275bcd76287e8d/debug/n_sub.Rdata')` - -```{r criteria1, echo=FALSE, out.width="100%"} -knitr::include_graphics("./figures/debug_example.png") -``` - -We can then play around with the function, given the supplied inputs. - -In this toy example we simply have a function which throws an error - a simple error to fix. However, this tool provides you with the possiblity to explore and tinker with your functions in the context which they fail in the pipeline. -NB. as mentioned in the console message - remember to update you changes to the source code! - - - - -## Targets style debugging - -There are several approaches to debugging a pipeline that is erroring out. Find much more details in the [targets manual](https://books.ropensci.org/targets/debugging.html): - - - In a clean R session, load the inputs to the target that is producing the error via `tar_load(names = c( - %\VignetteIndexEntry{Git Workflow} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r setup, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - - -# Workflow - -We work with three top branches - -* `main` Latest stable release. -* `test` Semi stable testing environment. -* `dev` For development purposes. - -The workflow is as follows: -#### 1) Branch of the latest version of `dev`: -``` -git checkout dev -git pull -git checkout -b "feature/cool_new_feature" -``` -#### 2) Implement and push to feature branch continously. -``` -# The below creates the remote link. -git push --set-upstream origin feature/cool_new_feature -``` - -#### 3) Create a PR to merge into the dev branch. -##### 3.5) Handle merge conflicts locally if there are any. -``` -git checkout feature/cool_new_feature -git merge dev - -/// solve the conflicts - -git commit -a -m "Solved merge conflicts" -git push -``` - -*The next steps are required when a feature or set of features should move from dev into testing.* - -#### 4) Create a pull request and move dev into testing -* Perform various tests to ensure the new features are ready for production. - -#### 5) If releasing a new version: Create a pull request from testing into main. -##### 5.5) Celebrate that a new stable version have been released. diff --git a/vignettes/ep_overview.Rmd b/vignettes/ep_overview.Rmd deleted file mode 100644 index 76e35d6..0000000 --- a/vignettes/ep_overview.Rmd +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: "Endpoint Specification" -output: - rmarkdown::html_vignette: - toc: true -vignette: > - %\VignetteIndexEntry{Endpoint Specification} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{knitr::rmarkdown} -editor_options: - markdown: - wrap: sentence ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -# Overview - -The endpoint specifications comprise a central input to {chef} as it contains the instructions to produce the statistics for endpoints. - -The specifications contain instructions on *what* data to use for the endpoint, *what* statistics to include, *how* the statistics are calculated, *how* to group the population, and *what* criteria must be met to include the endpoint in the final dossier. - -The specifications must be organized in a data table where each row contains an endpoint specification -These rows are processed independently and may represent one of more endpoints. -One single specification may contain instructions to produce multiple endpoints for e.g. different severity levels or system organ classes. - -The result of processing the endpoint specifications through {chef} is a set of statistics, which are structured in a long-formatted data table that can be further processed by downstream modules that e.g. format the raw results to endpoint tables. This is out of scope of {chef}. - -Many of the components in the endpoint specifications consist of references to custom functions outside {chef}, which provide high flexibility in defining the endpoints. -Both the endpoint specifications and associated custom functions must be supplied by the user as inputs to {chef} and is not a part of the package. - -The endpoint definition is created by calling the function `mk_endpoint_str`. Some endpoint parameters are required while others are optional. Optional parameters are by default set to empty when calling `mk_endpoint_str`, so that the user does not have to type all endpoint parameters every time but only those that are required and relevant to each endpoint specification. - -In summary, the endpoint specifications can be considered a cooking recipe that along with a set of ingredients (the trial data) and the cooking tools (the custom functions) are handed over to the chef, {chef}, that prepares the endpoints. - -For more details on the internal steps of {chef} see [Getting Started with Pipelines](targets_gettingstarted.html). - -# Components - -The parameters of each endpoint specification can be grouped in to the sets below, which are explained in their respective sections: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypeSectionEndpoint parameter
Population and outcome specifications[ADaM data](ep_spec_adam_data.html). What data to use and how to consolidate it into a single data table for the endpoint that is used to calculate statistics.`study_metadata`
`data_prepare`
[Treatment arms](ep_spec_treatment_arms.html). Which variable contains marking of the treatment arms and what is the reference/intervention treatment arm.`treatment_var`
`treatment_refval`
[Analysis population](ep_spec_population_def.html). How to filter the data to the analysis population for the endpoint.`pop_var`
`pop_value`
`custom_pop_filter`
[Endpoint events](ep_spec_event_def.html). How to define events for the endpoint.`period_var`
`period_value`
`endpoint_filter`
`group_by`
[Strata](ep_spec_strata_def.html). How to slice the data within the endpoint and if only slices with events should be kept.`stratify_by`
`only_strata_with_events`
[Endpoint label](ep_spec_label.html). Description of what events the endpoint presents.`endpoint_label`
Methods[Criteria methods](methods_criteria.html). Requirements that must be met to include different types of statistics in the endpoint results.`crit_endpoint`
`crit_by_strata_by_trt`
`crit_by_strata_across_trt`
[Statistical methods](methods_stat.html). Statistical methods to apply in the endpoint.`stat_by_strata_by_trt`
`stat_by_strata_across_trt`
`stat_across_strata_across_trt`
diff --git a/vignettes/ep_spec_adam_data.Rmd b/vignettes/ep_spec_adam_data.Rmd deleted file mode 100644 index 4fc687b..0000000 --- a/vignettes/ep_spec_adam_data.Rmd +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "ADaM Data" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{ADaM Data} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -# Overview - -Each endpoint needs to be generated from a single analysis data set containing information from one or more ADaM data sets. Each analysis data set is created by a custom ADaM function written by the trial programmer that is referenced in the endpoint specification via the `data_prepare` parameter. The ADaM function contains instructions to what ADaM data sets to join, how to join, and what custom variables e.g., new groupings, to create (if any). - -There are three requirements for the ADaM function: - -1. It must have exactly one input argument `study_metadata` which is an endpoint specification parameter, preferably a 1-element list, that contains any relevant information about the study that may be used to read the ADaM data sets. If no such information is needed, then leave `study_metadata` unused in the function and set it as an empty `list()` in the endpoint specification. -2. It must each return a `data.table` object. If you do not work in `data.table`, you can do this by converting your `tibble` or `data.frame` object to a `data.table` object at the end of the function via `data.table::as.data.table(my_data_frame)`. -3. The returned table must contain all the ADAM variables (and derived variables) that are mentioned in the endpoint specification and also the subject ID variable `USUBJID`. - -In summary, the analysis data set generation is controlled with the following two parameters in the endpoint specification: - -* **data_prepare**: Reference to a custom function that returns a single consolidated analysis table that contains all ADaM data needed in the endpoint. -* **study_metadata**: Object containing study specifics that are relevant for reading the ADAM data. Must be parsed as the input to the ADaM and ADSL functions. May be empty if not needed. - -# Examples - -##### Ex 1.1 - -Here is an example of an ADAM function that first reads the `ADSL` data table from {pharmaverseadam}, filters it down to two treatment arms, and enriches it with a derived variable. Then it merges the enriched ADSL with ADAE from {pharmaverseadam} and return a single table with all the ADaM information from `ADSL` and `ADAE`: - -```{r, eval = FALSE} -# Example of ADaM function that merges information from ADSL and ADAE -mk_adam_ex1_1 <- function(study_metadata) { - - # Read ADSL from {pharmaverseadam} - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - - # Filter treatment arms - adsl <- adsl[adsl$TRT01A %in% c('Placebo', 'Xanomeline High Dose')] - - # New derived ADSL variable - adsl[, AGEGR2 := data.table::fcase(AGE < 70, "AGE < 70", - AGE >= 70, "AGE >= 70")] - - # Read ADAE from {pharmaverseadam} - adae <- data.table::as.data.table(pharmaverseadam::adae) - - # Merge ADSL and ADAE - adae_out <- - merge(adsl, adae[, c(setdiff(names(adae), names(adsl)), "USUBJID"), with = - F], by = "USUBJID", all = TRUE) - return(adae_out) -} -``` - -##### Ex 1.2 - -You might also have cases where information from several ADAM tables are required in your endpoint. Here is an example that returns the same ADaM information as the example above with the addition of the baseline body weight extracted from `ADVS`. So information from three ADaM tables are merged and returned in a single analysis data set: - -```{r, eval = FALSE} -# Example of ADaM function that merges information from three ADaM tables -mk_adam_ex1_2 <- function(study_metadata) { - - # Read ADSL from {pharmaverseadam} - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - - # Filter treatment arms - adsl <- adsl[adsl$TRT01A %in% c('Placebo', 'Xanomeline High Dose')] - - # New derived ADSL variable - adsl[, AGEGR2 := data.table::fcase(AGE < 70, "AGE < 70", - AGE >= 70, "AGE >= 70")] - - # Read ADAE from {pharmaverseadam} - adae <- data.table::as.data.table(pharmaverseadam::adae) - - # Read ADVS from {pharmaverseadam} - advs <- data.table::as.data.table(pharmaverseadam::advs) - - # Identify baseline body weight - advs_bw <- advs[advs$PARAMCD == "WEIGHT" & advs$VISIT == "BASELINE"] - - # Create new variable bw_baseline - advs_bw[["bw_baseline"]] <- advs_bw[["AVAL"]] - - # Merge ADSL, ADAE and baseline body weight from ADVS - ax_out <- - merge(adsl, adae[, c(setdiff(names(adae), names(adsl)), "USUBJID"), with = F], - by = "USUBJID", all.x = TRUE) %>% - merge(., advs_bw[, c("bw_baseline", "USUBJID")], by = "USUBJID", all.x = TRUE) - - return(ax_out) -} -``` - -#### Ex 1.3 - -The ADaM functions, once defined, need to be linked to the corresponding endpoint specifications via the data_prepare parameter. The creation of each endpoint specification is facilitated by the mk_endpoint_str function wherein this data_prepare parameter is established. In the following example, the `mk_adam_ex1_2` function is utilized to produce the analysis data intended for an endpoint. -```{r, eval=FALSE} -# Example of endpoint specification of ADaM function. -# The dots must be replaced with other required parameters. -ep_spec_ex1_3 <- mk_endpoint_str(data_prepare = mk_adam_ex1_2, - ...) -``` diff --git a/vignettes/ep_spec_event_def.Rmd b/vignettes/ep_spec_event_def.Rmd deleted file mode 100644 index 4b1f466..0000000 --- a/vignettes/ep_spec_event_def.Rmd +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: "Endpoint Events" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Endpoint Events} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - - - - -# Overview - -Events are the records in the analysis data that are defined as 'positives' in the context of the endpoint. -For example, adverse events of a certain type may be events for one endpoint, while for a demographic endpoint all subjects in the analysis data (that comply to the period filter) are events. - -Since the events are bounded to the rows of the analysis data, the entity of an event is not fixed. It depends on the analysis data. It may e.g. be a set of visits, or measurements, or subjects. - -For each endpoint the definition of an event is controlled via the following endpoint specification parameters: - -* **period_var**: Name of the flag variable in the analysis data set that indicates if the record is contained the required time slot for being an event. -* **period_value**: The value of `period_var` that indicates if the record is in the required time slot. -* **endpoint_filter**: An optional parameter that contains a free text endpoint filter that adds to the period filter defined by `period_var` and `period_value`. -* **group_by**: An optional parameter that splits the analysis data in exclusive groups that each is injected to their own endpoint. This adds to narrowing down the endpoint events as it disregards any events outside the group specific to the endpoint. - -Let us investigate how each of these parameters affect the set of events that are exposed to the endpoint. - -Initially, the input data returned from `data_prepare` is filtered to create the analysis data using the population filter c.f. [Analysis population](ep_spec_population_def.html). -Next, the endpoint **events** in the analysis data are identified according to the filters above. - -Within the endpoint itself, each **cell** in the statistics also slices the analysis data by e.g. strata and treatment arm combinations. -The strata work independently of the event definition, so within each strata there may be both event and non-events. See [Strata](ep_spec_strata_def.html) for further details. - -The illustrations below depict the four possible cases of combining the event specification parameters. - -```{r fig_ep_event, echo=FALSE, out.width="80%", fig.align = "left"} -knitr::include_graphics("./figures/endpoint_event.png") -``` - -  - -* **Case A**: Only the period filter (`period_value` and `period_var`) is applied in which case all analysis data records that meet the period filter are events. -* **Case B**: The period filter and the endpoint filter are active. -* **Case C**: Same setup as in case A with the addition of endpoint grouping so that the events are further restricted within each group element. The case illustrates the setup for one of the group levels (group 1). -* **Case D**: Same setup as in case B with the addition of endpoint grouping so that the events are further restricted within each group element. The case illustrates the setup for one of the group levels (group 1). - -# Examples - -##### Ex 4.1 - -Here are three examples of the syntax for supplying the period filter, the endpoint filter and the endpoint grouping, respectively, in the endpoint specification. - -```{r, eval = FALSE} -# Example of partial endpoint specification with period filter -ep_spec_ex4_1_1 <- mk_endpoint_str(period_var = "ANL01FL" - period_value = "Y", - ...) - -# Example of partial endpoint specification with endpoint filtering -ep_spec_ex4_1_2 <- mk_endpoint_str(endpoint_filter = 'ASEV == "MILD"', - ...) - -# Example of partial endpoint specification with endpoint grouping -ep_spec_ex4_1_3 <- mk_endpoint_str(group_by = - list(list(AESEV = c( - "MILD", "MODERATE", "SERVERE" - ))), - ...) -``` - -##### Ex 4.2 - -You may also state the group levels in a lazy manner in which case all group levels in the analysis data set is applied. This is useful if you group by a variable with many levels e.g., system organ class (SOC), or if you which to leave out certain group levels. Here is an example of how to supply a lazy grouping of SOC. - -```{r, eval = FALSE} -# Example of partial endpoint specification with lazy endpoint grouping -ep_spec_ex4_2 <- mk_endpoint_str(group_by = list(list(AESOC = c())), - ...) -``` - -##### Ex 4.3 - -In case you want to combine groups on multiple variables you can state each dimension of the grouping in the `group_by` parameter. This is useful for e.g., generating endpoint for each combination of adverse event severity and SOC as exemplified below with lazy grouping: - -```{r, eval = FALSE} -# Example of partial endpoint specification with lazy endpoint grouping on multiple variables -ep_spec_ex4_3 <- mk_endpoint_str(group_by = list(list(AESEV = c(), AESOC = c())), - ...) -``` - -##### Ex 4.4 - -As a continuation of the previews example, if you are only interested in mild adverse events you can replace the grouping by `AESEV` by an endpoint filter that filters by `AESEV`. - -```{r, eval = FALSE} -# Example of partial endpoint specification with endpoint filter and lazy grouping -ep_spec_ex4_4 <- mk_endpoint_str(endpoint_filter = 'ASEV == "MILD"', - group_by = list(list(AESOC = c())), - ...) -``` diff --git a/vignettes/ep_spec_label.Rmd b/vignettes/ep_spec_label.Rmd deleted file mode 100644 index e73bda8..0000000 --- a/vignettes/ep_spec_label.Rmd +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: "Endpoint Label" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Endpoint Label} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -# Overview - -For each endpoint the user has the option to specify a label that describes the content of the endpoint. -This is parsed as a string via the `endpoint_label` parameter in the endpoint specification. -The string may contain references to other parameters in the endpoint specification encapsulated by `<>`. -These references will be dynamically evaluated to the respective values of the parameters. -The endpoint label does not have to be unique for each endpoint, and references are optional. - -Label referencing is particularly useful when endpoint grouping is applied, which resolves in an endpoint for each group level. -In this case you may dynamically refer to the group level present in each of these endpoints. - -The dynamic labeling may also be a convenient way to refer to other important parameters that identifies each endpoint e.g., `pop_var`, `period_var` and `endpoint_filter`. - - -# Examples - - -##### Ex 6.1 - -Suppose you define an endpoint specification with an endpoint on the safety analysis population (`SAFFL = "Y"`) grouped on adverse event severity `AESEV` which has three levels: `MILD`, `MODERATE`, and `SERVERE`. -Note that this assumes that `SAFFL` and `AESEV` are variables in the analysis data set. For this setup you may specify a dynamic endpoint label that refer to both the population and the severity: - -```{r, eval=FALSE} -# Example of partial endpoint specification of analysis population -ep_spec_ex6_1 <- mk_endpoint_str(pop_var = "SAFFL", - pop_value = "Y", - group_by = list(list(AESEV = c())), - endpoint_label = "Example: - adverse events", - ...) -``` - -This will resolve in three endpoints with the following labels: - -```{r, eval=FALSE} -"Example: SAFFL - MILD adverse events" -"Example: SAFFL - MODERATE adverse events" -"Example: SAFFL - SERVERE adverse events" -``` - -##### Ex 6.2 - -In extension of the previous example, suppose you only want to consider subjects of ages between 18-64 years via the age group variable `AGEGR1` in the analysis data set. -In addition, you want to update the endpoint label to refer to this filter. This could look like this: - -```{r, eval=FALSE} -# Example of partial endpoint specification of analysis population -ep_spec_ex6_2 <- mk_endpoint_str( - pop_var = "SAFFL", - pop_value = "Y", - endpoint_filter = "AGEGR1 == '18-64'", - group_by = list(list(AESEV = c())), - endpoint_label = - "Example: - adverse events / ", - ... -) -``` - -Again, this resolve in three endpoints and they will now have these endpoint labels: - -```{r, eval=FALSE} -"Example: SAFFL - MILD adverse events / AGEGR1 == '18-64'" -"Example: SAFFL - MODERATE adverse events / AGEGR1 == '18-64'" -"Example: SAFFL - SERVERE adverse events / AGEGR1 == '18-64'" -``` - -##### Ex 6.3 - -If you do not provide any references in the endpoint label, it is simply returned as a static string. -Hence, if you modify the label in the previous example as follows: - -```{r, eval=FALSE} -# Example of partial endpoint specification of analysis population -ep_spec_ex6_3 <- mk_endpoint_str( - pop_var = "SAFFL", - pop_value = "Y", - endpoint_filter = "AGEGR1 == '18-64'", - group_by = list(list(AESEV = c())), - endpoint_label = "Example of a fixed endpoint label", - ... -) -``` - -You will get the following endpoint labels: - -```{r, eval=FALSE} -"Example of a fixed endpoint label" -"Example of a fixed endpoint label" -"Example of a fixed endpoint label" -``` diff --git a/vignettes/ep_spec_population_def.Rmd b/vignettes/ep_spec_population_def.Rmd deleted file mode 100644 index 7421698..0000000 --- a/vignettes/ep_spec_population_def.Rmd +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Analysis Population" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Analysis Population} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -# Overview - -The analysis data constitutes all the records in the input data set that are relevant to the statistics in the endpoint. -This can be controlled via the following endpoint specification parameters that filters the analysis data to the analysis population: - -* **pop_var**: Name of the flag variable in the input data set that indicates if the record is eligible to the analysis in the endpoint. -* **pop_value**: The value of `pop_var` that indicates if the record eligible. -* **custom_pop_filter**: An optional parameter that contains a free text population filter that adds to the filter formed by `pop_var` and `pop_value`. Recommended for ad-hoc testing of different sub populations. - -The approach is that all records that are irrelevant for the endpoint are removed at an early stage, so that only the relevant analysis population is exposed to the statistical functions and criteria functions. - -# Example - -##### Ex 3.1 - -Here is an example of a specification of the analysis data using the safety population flag (assumed to be present in the input data returned from `data_prepare`: - -```{r, eval=FALSE} -# Example of partial endpoint specification of population filter -ep_spec_ex3_1 <- mk_endpoint_str(pop_var = "SAFFL", - pop_value = "Y", - ...) -``` - -##### Ex 3.2 - -The population may be further filtered by adding a custom filter. -For example, in addition to the population filter from the example above you may add a custom filter so that only subjects aged at least 55 years are inlcuded in the population. - -```{r, eval=FALSE} -# Example of partial endpoint specification of analysis population -ep_spec_ex3_2 <- mk_endpoint_str(pop_var = "SAFFL", - pop_value = "Y", - custom_pop_filter = "AGE >= 55", - ...) -``` diff --git a/vignettes/ep_spec_strata_def.Rmd b/vignettes/ep_spec_strata_def.Rmd deleted file mode 100644 index 2ff5fd3..0000000 --- a/vignettes/ep_spec_strata_def.Rmd +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: "Strata" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Strata} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -# Overview - -The `stratify_by` endpoint parameter specifies the variables in the analysis data set on which the population will be sliced in the endpoint for more granular comparison on treatment arm effects. - -These are the key properties of the `stratify_by` parameter: - -* `stratify_by` is a `list()` object -* The list may be empty if no stratification is needed. -* Regardless of whether any strata are specified (and meet the strata criteria c.f. [Criteria methods](methods_criteria.html)) or not, the endpoint statistics will always be derived for the *total* analysis population in the endpoint. The *total* is a special stratification with only one level. -* The stratum values are not required in `stratify_by`. All relevant statistics will be applied to each stratum. -* The strata variables are applied independently, i.e. strata will not be combined. So if e.g. you stratify by age group and gender, you will not have cross combination such as `SEX="F" & AGEGR="<65"`. - -If cross combinations of two stratum variables `A` and `B` are required, either: - -1. Create a derived variable in `data_prepare` that combines `A` and `B` and stratify on this new variable. -2. Use `endpoint_filter` or `group_by` to group or filter the data on `A` and then stratify on `B` within each group level. - -The `only_strata_with_events` endpoint parameter enables the user to trim the set of presented stratification levels to those that only contain events in either of the treatment arms. -Activating this trimming may save a considerable amount of computation time if the combination of `group_by` and `stratify_by` forms many hundreds of strata levels of which only a handful of strata levels have events and are of interest. - -# Examples - -##### Ex 5.1 - - -```{r, eval = FALSE} -# Example of partial endpoint specification with a strata definition -ep_spec_ex5_2 <- mk_endpoint_str(stratify_by = list(c("SEX")), - ...) -``` - -##### Ex 5.2 - -Two strata set: - -```{r, eval = FALSE} -# Example of partial endpoint specification with a strata definition -ep_spec_ex5_2 <- mk_endpoint_str(stratify_by = list(c("SEX", "AGEGGR1")), - ...) -``` - -##### Ex 5.3 - -No strata, in which case only the *total* will be present in the endpoint: - -```{r, eval = FALSE} -# Example of partial endpoint specification with a strata definition -ep_spec_ex5_3 <- mk_endpoint_str(stratify_by = list(), - ...) -``` diff --git a/vignettes/ep_spec_treatment_arms.Rmd b/vignettes/ep_spec_treatment_arms.Rmd deleted file mode 100644 index 8879f19..0000000 --- a/vignettes/ep_spec_treatment_arms.Rmd +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "Treatment Arms" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Treatment Arms} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -# Overview - -The treatment arm specifications are used to split the analysis data in separate treatment arm that are assessed and compared in the endpoint. - -In the endpoint specification the following two parameters must be set: - -* **treatment_var**: Name of the variable in the analysis data set that contains the treatment arms. -* **treatment_refval**: The value of `treatment_var` that corresponds to the reference/intervention. This may be used for asymmetric statistics that compare the treatment effects. - -# Example - -##### Ex 2.1 - -Here is an example of a partial endpoint specification with treatment arm specifications: - -```{r, eval=FALSE} -# Example of endpoint specification of treatment arms. -ep_spec_ex2_1 <- mk_endpoint_str(treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - ... -) -``` diff --git a/vignettes/example_end2end.Rmd b/vignettes/example_end2end.Rmd deleted file mode 100644 index 3d3ed70..0000000 --- a/vignettes/example_end2end.Rmd +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "End-to-end Example" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{End-to-end Example} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -# Introduction diff --git a/vignettes/example_ep_spec.Rmd b/vignettes/example_ep_spec.Rmd deleted file mode 100644 index 175e157..0000000 --- a/vignettes/example_ep_spec.Rmd +++ /dev/null @@ -1,693 +0,0 @@ ---- -title: "Library of Endpoint Specifications" -resource_files: - - vignettes/figures -output: - rmarkdown::html_vignette: - toc: true -vignette: > - %\VignetteIndexEntry{Library of Endpoint Specifications} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{knitr::rmarkdown} -editor_options: - markdown: - wrap: sentence ---- - -```{r, include = FALSE} -library(magrittr) -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - - - - - - - - - - - - -## Introduction -This vignette provides a library of endpoint examples, derived endpoint specifications and associated ADaM functions and statistical functions. - - -## ADaM functions {.tabset .tabset-fade .tabset-pills } - -#### adam_01 -```{r eval=FALSE} -# Merge ADSL and ADAE from {pharmaverseadam} -mk_adam_01 <- function(study_metadata) { - - # Read ADSL - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - - # Filter treatment arms - adsl <- adsl[adsl$TRT01A %in% c('Placebo', 'Xanomeline High Dose')] - - # New derived ADSL age group variable - adsl[, AGEGR2 := data.table::fcase(AGE < 65, "AGE < 65", - AGE >= 65, "AGE >= 65", - default = NA)] - - # New derived ADSL variable for SEX - adsl[, SEX2 := fcase(SEX == "F", "Female", - SEX == "M", "Male", - default = NA)] - - # Read ADAE - adae <- data.table::as.data.table(pharmaverseadam::adae) - - # Merge ADSL and ADAE - adam_out <- - merge(adsl, adae[, c(setdiff(names(adae), names(adsl)), "USUBJID"), with = - F], by = "USUBJID", all = TRUE) - - # Create synthetic period flag var (for demo purpose) - set.seed(123) # set seed for reproducibility - adam_out[["ANL01FL"]] <- sample(c("Y", ""), size = nrow(adam_out), replace = TRUE, prob = c(0.5, 0.5)) - - return(adam_out) -} -``` - -#### adam_02 -```{r eval=FALSE} -# Merge ADSL and ADVS from {pharmaverseadam} -mk_adam_02 <- function(study_metadata) { - - # Read ADSL - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - - # Filter treatment arms - adsl <- adsl[adsl$TRT01A %in% c('Placebo', 'Xanomeline High Dose')] - - # New derived ADSL age group variable - adsl[, AGEGR2 := data.table::fcase(AGE < 65, "AGE < 65", - AGE >= 65, "AGE >= 65", - default = NA)] - - # New derived ADSL variable for SEX - adsl[, SEX2 := fcase(SEX == "F", "Female", - SEX == "M", "Male", - default = NA)] - - # Read ADVS - advs <- data.table::as.data.table(pharmaverseadam::advs) - - # Identify baseline body weight - advs_bw <- advs[advs$PARAMCD == "WEIGHT" & advs$VISIT == "BASELINE"] - - # Create new variable BW_BASELINE - advs_bw[["BW_BASELINE"]] <- advs_bw[["AVAL"]] - - # Merge ADSL, ADAE and baseline body weight from ADVS - adam_out <- - merge(adsl, advs_bw[, c("BW_BASELINE", "USUBJID")], by = "USUBJID", all.x = TRUE) - - # Create synthetic period flag var (for demo purpose) - set.seed(123) # set seed for reproducibility - adam_out[["ANL01FL"]] <- sample(c("Y", ""), size = nrow(adam_out), replace = TRUE, prob = c(0.5, 0.5)) - - return(adam_out) -} -``` - -#### adam_03 -```{r eval=FALSE} -# Merge ADSL and ADLB from {pharmaverseadam} -mk_adam_03 <- function(study_metadata) { - - # Read ADSL - adsl <- data.table::as.data.table(pharmaverseadam::adsl) - - # Filter treatment arms - adsl <- adsl[adsl$TRT01A %in% c('Placebo', 'Xanomeline High Dose')] - - # New derived ADSL age group variable - adsl[, AGEGR2 := data.table::fcase(AGE < 65, "AGE < 65", - AGE >= 65, "AGE >= 65", - default = NA)] - - # New derived ADSL variable for SEX - adsl[, SEX2 := fcase(SEX == "F", "Female", - SEX == "M", "Male", - default = NA)] - - # Read ADLB - adlb <- data.table::as.data.table(pharmaverseadam::adlb) %>% - .[.[["PARAMCD"]] == 'SODIUM' & .[["AVISIT"]] %in% c("Baseline", "Week 8", "Week 16"),] - - adlb2 <- - merge(adlb, - adlb[adlb$AVISIT == "Baseline", c("USUBJID", "AVAL")], - by = "USUBJID", all.x = TRUE) - - adlb2[["VALUE_BASELINE"]] <- adlb2[["AVAL.y"]] - adlb2[["VALUE_CHANGE"]] <- adlb2[["AVAL.x"]] - adlb2[["AVAL.y"]] - - # Merge ADSL and ADLB - adam_out <- - merge(adsl, adlb2[,c("USUBJID","PARAMCD","AVISIT","VALUE_BASELINE","VALUE_CHANGE", "ANL01FL")], by = "USUBJID", all.x = TRUE) - - return(adam_out) -} -``` - -## Criteria functions {.tabset .tabset-fade .tabset-pills } - -#### ep_crit_min_subev_by_trt -```{r eval=FALSE} -# Endpoint criteria function: Accept endpoint if a minimum number of subjects -# with events is present in one or both treatment arms -ep_crit_min_subev_by_trt <- function(dat, - event_index , - treatment_var, - subjectid_var, - min_n_subev, - requirement_type = c("any", "all"), - ...) { - requirement_type <- match.arg(requirement_type) - - # Evaluate if each treatment arm has enough subjects with events - n_subev_eval <- dat[J(event_index)] %>% - unique(., by = c(subjectid_var, treatment_var)) %>% - .[, .(n_subev = .N), by = eval(treatment_var)] %>% - .[["n_subev"]] >= min_n_subev - - if (requirement_type == "any") { - return(any(n_subev_eval)) - } - return(all(n_subev_eval)) -} -``` - - -## Statistical functions - -### By treatment arm and strata {.tabset .tabset-fade .tabset-pills} - -#### N -```{r eval=FALSE} -# Number of subjects -n_sub <- function(dat, - cell_index, - subjectid_var, - ...) { - - stat <- dat[J(cell_index)] %>% - unique(., by = c(subjectid_var)) %>% - nrow() - - return(data.table( - description = "Number of subjects", - label = "N", - value = stat - )) -} -``` - -#### n -```{r eval=FALSE} -# Number of subjects with events -n_subev <- function(dat, - event_index, - cell_index, - subjectid_var, - ...) { - - stat <- dat[J(intersect(cell_index, event_index))] %>% - unique(., by = c(subjectid_var)) %>% - nrow() - - return(data.table( - description = "Number of subjects with events", - label = "n", - value = stat - )) -} -``` - -#### % -```{r eval=FALSE} -# Proportion of subjects with events -p_subev <- function(dat, - event_index, - cell_index, - subjectid_var, - ...) { - - n_sub <- dat[J(cell_index)] %>% - unique(., by = c(subjectid_var)) %>% - nrow() - - n_subev <- dat[J(intersect(cell_index, event_index))] %>% - unique(., by = c(subjectid_var)) %>% - nrow() - - out <- - data.table(description = "Proportion of subjects with events", - label = "(%)", - value = n_subev / n_sub * 100) - - return(out) -} -``` - -#### mean -```{r eval=FALSE} -# Mean value -mean_value <- function(dat, - event_index, - cell_index, - subjectid_var, - var, - ...) { - stat <- dat[J(intersect(cell_index, event_index))] %>% - unique(., by = c(subjectid_var)) %>% - .[[var]] %>% - mean() - - - return(data.table( - description = "Summary statistics", - label = "mean", - value = stat - )) -} -``` - -#### SD -```{r eval=FALSE} -# Standard deviation -sd_value <- function(dat, - event_index, - cell_index, - subjectid_var, - var, - ...) { - stat <- dat[J(intersect(cell_index, event_index))] %>% - unique(., by = c(subjectid_var)) %>% - .[[var]] %>% - sd() - - return(data.table( - description = "Summary statistics", - label = "sd", - value = stat - )) -} -``` - -#### summary stats -```{r eval=FALSE} -# Summary statistics -summary_stats <- function(dat, - event_index, - cell_index, - subjectid_var, - var, - var_type = c("cont", "cat"), - ...) { - - # Check argument - var_type <- match.arg(var_type) - - # Filter analysis data to cell specific content - dat_cell <- dat[J(intersect(cell_index, event_index))] %>% - unique(., by = c(subjectid_var)) - - # Return statistics depending on the type of variable (continuous or categorical) - if (var_type == "cont") { - stat <- dat_cell %>% - dplyr::summarize( - mean = mean(get(var)), - median = median(get(var)), - sd = sd(get(var)), - min = min(get(var)), - max = max(get(var)), - n_nonmiss = sum(!is.na(get(var))), - n_miss = sum(is.na(get(var))) - ) - } else { - stat <- dat_cell %>% - dplyr::summarize(n_nonmiss = sum(!is.na(get(var))), - n_miss = sum(is.na(get(var)))) - } - - return(data.table( - description = "Summary statistics", - label = names(stat), - value = as.list(stat) - )) -} -``` - -### By strata and across treatment arms {.tabset .tabset-fade .tabset-pills} - -#### RR -```{r eval=FALSE} -# Relative Risk -rr <- function(dat, - event_index, - cell_index, - strata_var, - strata_val, - treatment_var, - treatment_refval, - subjectid_var, - ...) { - - # - - return(data.table( - description = "Relative Risk", - label = "RR", - value = NA - )) -} - -``` - -#### OR -```{r eval=FALSE} -# Odds Ratio -or <- function(dat, - event_index, - cell_index, - strata_var, - strata_val, - treatment_var, - treatment_refval, - subjectid_var, - ...) { - - # - - return(data.table( - description = "Odds Ratio", - label = "OR", - value = NA - )) -} -``` - -#### p-value -```{r eval=FALSE} -pval <- function(dat, - event_index, - cell_index, - strata_var, - strata_val, - treatment_var, - treatment_refval, - subjectid_var, - ...) { - - # - - return(data.table( - description = "p-value ", - label = "p-value", - value = NA - )) -} -``` - - -#### Hedge's G -```{r eval=FALSE} -hedgesg <- function(dat, - event_index, - cell_index, - strata_var, - strata_val, - treatment_var, - treatment_refval, - subjectid_var, - ...) { - - # - - return(data.table( - description = "Hedges G", - label = "Hedge's G", - value = NA - )) -} -``` - - -#### Helper functions -```{r eval=FALSE} -# -``` - -### Across strata and treatment arms {.tabset .tabset-fade .tabset-pills} - -#### p-value interaction -```{r eval=FALSE} -pval_i <- function(dat, - event_index, - strata_var, - treatment_var, - treatment_refval, - subjectid_var, - ...) { - - # - - return(data.table( - description = "p-value interaction test", - label = "Interaction p-value", - value = NA - )) -} -``` - -#### Helper functions -```{r eval=FALSE} -# -``` - -## Endpoint specification: Binary outcomes - -Suppose we want to produce statistics for the following type of endpoint: - -```{r fig_ep_ex1, echo=FALSE, out.width="100%", fig.align = "left"} -knitr::include_graphics("./figures/ep_spec_mockup_binary.png") -``` - -We note that the endpoint specification must contain statistics both by strata and treatment arm (`N`, `n`, `%`), by strata across treatment arms (`RR`, `OR`, `P-value`) and across strata and treatment arms (`P-value interaction`) which must be calculated on two strata (`Age`, `Sex`). - -We assume the endpoint concerns adverse events and must be applied to the full safety population set (`SAFFL="Y"`). - -Let us consider different variations of this endpoint specification in the cases below. -In the examples we demonstrate how to apply the statistics either on the full population, by grouping on system organ class (SOCs) and severity, and filtered on severity. - -Note that the set of SOCs is trimmed according to an endpoint criterion function (`ep_crit_min_subev_by_trt`), so that only SOCs with at least 5 subjects with events in at least one treatment arm are included. - -##### Case 1: Adverse events (base form) - -```{r eval=FALSE} -ep_spec <- mk_endpoint_str( - data_prepare = mk_adam_01, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_var = "SAFFL", - pop_value = "Y", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("AGEGR2", "SEX2")), - stat_by_strata_by_trt = list(n_sub, n_subev, p_subev), - stat_by_strata_across_trt = list(rr, or, pval), - stat_across_strata_across_trt = list(pval_i), - endpoint_label = "Adverse events - " -) -``` - - -##### Case 2: Adverse events grouped by system organ class - -```{r eval=FALSE} -ep_spec <- mk_endpoint_str( - data_prepare = mk_adam_01, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_var = "SAFFL", - pop_value = "Y", - period_var = "ANL01FL", - period_value = "Y", - group_by = list(list(AESOC = c())), - stratify_by = list(c("AGEGR2", "SEX2")), - stat_by_strata_by_trt = list(n_sub, n_subev, p_subev), - stat_by_strata_across_trt = list(rr, or, pval), - stat_across_strata_across_trt = list(pval_i), - crit_endpoint = list( - c( - ep_crit_min_subev_by_trt, - min_n_subev = 5, - requirement_type = "any" - ) - ), - endpoint_label = "Adverse events - - " -) -``` - -##### Case 3: Mild adverse events grouped by system organ class - -```{r eval=FALSE} -ep_spec <- mk_endpoint_str( - data_prepare = mk_adam_01, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_var = "SAFFL", - pop_value = "Y", - period_var = "ANL01FL", - period_value = "Y", - endpoint_filter = 'ASEV == "MILD"', - group_by = list(list(AESOC = c())), - stratify_by = list(c("AGEGR2", "SEX2")), - stat_by_strata_by_trt = list(n_sub, n_subev, p_subev), - stat_by_strata_across_trt = list(rr, or, pval), - stat_across_strata_across_trt = list(pval_i), - crit_endpoint = list( - c( - ep_crit_min_subev_by_trt, - min_n_subev = 5, - requirement_type = "any" - ) - ), - endpoint_label = "Mild adverse events - " -) -``` - -##### Case 4: Adverse events grouped by severity and system organ class - -```{r eval=FALSE} -ep_spec <- mk_endpoint_str( - data_prepare = mk_adam_01, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_var = "SAFFL", - pop_value = "Y", - period_var = "ANL01FL", - period_value = "Y", - group_by = list(list(ASEV = c(), AESOC = c())), - stratify_by = list(c("AGEGR2", "SEX2")), - stat_by_strata_by_trt = list(n_sub, n_subev, p_subev), - stat_by_strata_across_trt = list(rr, or, pval), - stat_across_strata_across_trt = list(pval_i), - crit_endpoint = list( - c( - ep_crit_min_subev_by_trt, - min_n_subev = 5, - requirement_type = "any" - ) - ), - endpoint_label = " adverse events - - " -) -``` - -## Endpoint specification: Demographics - -Suppose we want to produce a set of baseline summary statistics as follows: - -```{r fig_ep_ex2, echo=FALSE, out.width="100%", fig.align = "left"} -knitr::include_graphics("./figures/ep_spec_mockup_demographics.png") -``` - -This endpoint contains both continuous variables (`Age`, `Body Weight at Baseline`) and categorical variables (`Age Group`, `Sex`) for which different statistics are applied. So the endpoint will be split up in two endpoint specifications, one for the continuous variables (case 5), and one for the categorical variables (case 6). - -In both cases a single statistical function is applied, (`summary_stats`) which compactly supplies all required statistics in one function call. Alternatively, we can define separate functions for each statistics and apply the individually in the endpoint specification. - -##### Case 5: Baseline characteristics of analysis population (continuous variables) - -```{r eval=FALSE} -ep_spec_pt1 <- mk_endpoint_str( - data_prepare = mk_adam_02, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_var = "SAFFL", - pop_value = "Y", - period_var = "ANL01FL", - period_value = "Y", - stat_by_strata_by_trt = list( - "summary_stats: AGE" = c(summary_stats, var = "AGE", var_type = "cont"), - "summary_stats: BW_BASELINE" = c(summary_stats, var = "BW_BASELINE", var_type = "cont") - ), - endpoint_label = "Demographics endpoint (continuous measures)" -) -``` - -##### Case 6: Baseline characteristics of analysis population (categorical variables) -```{r eval=FALSE} -ep_spec_pt2 <- mk_endpoint_str( - data_prepare = mk_adam_02, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_var = "SAFFL", - pop_value = "Y", - period_var = "ANL01FL", - period_value = "Y", - stratify_by = list(c("AGEGR2", "SEX2")), - stat_by_strata_by_trt = list( - "summary_stats: AGEGR2" = c(summary_stats, var = "AGEGR2", var_type = "cat"), - "summary_stats: SEX2" = c(summary_stats, var = "SEX2", var_type = "cat") - ), - endpoint_label = "Demographics endpoint (categorical measures)" -) -``` - -We then collect the two endpoint specifications to cover the complete endpoint: - -```{r eval=FALSE} -ep_spec <- rbind(ep_spec_pt1, ep_spec_pt2) - -``` - -## Endpoint specification: Continuous outcomes - -Suppose we want to produce the following set of baseline and change from baseline summary statistics on a given measure: - -```{r fig_ep_ex3, echo=FALSE, out.width="100%", fig.align = "left"} -knitr::include_graphics("./figures/ep_spec_mockup_continuous.png") -``` - -We observe that mean and SD are calculated on each combination of treatment arm, strata (total and age), time (baseline, week 8 and 16). -By defining time (`AVISIT`) as the endpoint grouping we can obtain all the statistics in one endpoint specification. This implies that: - -* The mean and SD are calculated by treatment and strata levels (`stat_by_strata_by_trt`) on both the analysis data columns containing the baseline (`VALUE_BASELINE`) and change from baseline column (`VALUE_CHANGE`). -* Number of subjects (`N`) is also calculated by treatment and strata level (`stat_by_strata_by_trt`). -* For each endpoint grouping (`AVISIT`) Hedge's G is calculated by each strata level and across treatment arms (`stat_by_strata_across_trt`). - -The endpoint specification is shown in case 7. - -##### Case 7: Summary statistics on baseline and change from baseline - -```{r eval=FALSE} -ep_spec <- mk_endpoint_str( - data_prepare = mk_adam_03, - treatment_var = "TRT01A", - treatment_refval = "Xanomeline High Dose", - pop_value = "Y", - period_var = "ANL01FL", - period_value = "Y", - group_by = list(list(AVISIT = c())), - stratify_by = list(c("AGEGR2")), - stat_by_strata_by_trt = list( - "n_sub" = n_sub, - "mean: VALUE_BASELINE" = c(mean_value, var = "VALUE_BASELINE"), - "mean: VALUE_CHANGE" = c(mean_value, var = "VALUE_CHANGE"), - "sd: VALUE_BASELINE" = c(sd_value, var = "VALUE_BASELINE"), - "sd: VALUE_CHANGE" = c(sd_value, var = "VALUE_CHANGE") - ), - stat_by_strata_across_trt = list(hedgesg), - endpoint_label = "Baseline and change from baseline on SODIUM - " -) -``` diff --git a/vignettes/figures/StatisticsDescription.png b/vignettes/figures/StatisticsDescription.png deleted file mode 100644 index 4839356..0000000 Binary files a/vignettes/figures/StatisticsDescription.png and /dev/null differ diff --git a/vignettes/figures/criteria_eval.png b/vignettes/figures/criteria_eval.png deleted file mode 100644 index 3cdfbce..0000000 Binary files a/vignettes/figures/criteria_eval.png and /dev/null differ diff --git a/vignettes/figures/criteria_levels.png b/vignettes/figures/criteria_levels.png deleted file mode 100644 index 2d6d816..0000000 Binary files a/vignettes/figures/criteria_levels.png and /dev/null differ diff --git a/vignettes/figures/debug_example.png b/vignettes/figures/debug_example.png deleted file mode 100644 index 32ca774..0000000 Binary files a/vignettes/figures/debug_example.png and /dev/null differ diff --git a/vignettes/figures/endpoint_event.png b/vignettes/figures/endpoint_event.png deleted file mode 100644 index e98c47b..0000000 Binary files a/vignettes/figures/endpoint_event.png and /dev/null differ diff --git a/vignettes/figures/endpoint_event_1.png b/vignettes/figures/endpoint_event_1.png deleted file mode 100644 index 06357d6..0000000 Binary files a/vignettes/figures/endpoint_event_1.png and /dev/null differ diff --git a/vignettes/figures/endpoint_event_2.png b/vignettes/figures/endpoint_event_2.png deleted file mode 100644 index 0900e9b..0000000 Binary files a/vignettes/figures/endpoint_event_2.png and /dev/null differ diff --git a/vignettes/figures/endpoint_event_3.png b/vignettes/figures/endpoint_event_3.png deleted file mode 100644 index c569fcf..0000000 Binary files a/vignettes/figures/endpoint_event_3.png and /dev/null differ diff --git a/vignettes/figures/ep_spec_mockup_binary.png b/vignettes/figures/ep_spec_mockup_binary.png deleted file mode 100644 index a7490bf..0000000 Binary files a/vignettes/figures/ep_spec_mockup_binary.png and /dev/null differ diff --git a/vignettes/figures/ep_spec_mockup_continuous.png b/vignettes/figures/ep_spec_mockup_continuous.png deleted file mode 100644 index 6db1b25..0000000 Binary files a/vignettes/figures/ep_spec_mockup_continuous.png and /dev/null differ diff --git a/vignettes/figures/ep_spec_mockup_demographics.png b/vignettes/figures/ep_spec_mockup_demographics.png deleted file mode 100644 index 4a98686..0000000 Binary files a/vignettes/figures/ep_spec_mockup_demographics.png and /dev/null differ diff --git a/vignettes/figures/mockup_binary.png b/vignettes/figures/mockup_binary.png deleted file mode 100644 index dbccc96..0000000 Binary files a/vignettes/figures/mockup_binary.png and /dev/null differ diff --git a/vignettes/figures/mockup_continuous.png b/vignettes/figures/mockup_continuous.png deleted file mode 100644 index a4357c2..0000000 Binary files a/vignettes/figures/mockup_continuous.png and /dev/null differ diff --git a/vignettes/figures/mockup_continuous_decomposed.png b/vignettes/figures/mockup_continuous_decomposed.png deleted file mode 100644 index 27b31e4..0000000 Binary files a/vignettes/figures/mockup_continuous_decomposed.png and /dev/null differ diff --git a/vignettes/figures/mockup_demographics.png b/vignettes/figures/mockup_demographics.png deleted file mode 100644 index de3dfea..0000000 Binary files a/vignettes/figures/mockup_demographics.png and /dev/null differ diff --git a/vignettes/figures/mockup_demographics_decomposed.png b/vignettes/figures/mockup_demographics_decomposed.png deleted file mode 100644 index 61931da..0000000 Binary files a/vignettes/figures/mockup_demographics_decomposed.png and /dev/null differ diff --git a/vignettes/figures/pipeline.png b/vignettes/figures/pipeline.png deleted file mode 100644 index 51cc7f8..0000000 Binary files a/vignettes/figures/pipeline.png and /dev/null differ diff --git a/vignettes/methods_criteria.Rmd b/vignettes/methods_criteria.Rmd deleted file mode 100644 index 0c22c98..0000000 --- a/vignettes/methods_criteria.Rmd +++ /dev/null @@ -1,387 +0,0 @@ ---- -title: "Criteria Methods" -output: - rmarkdown::html_vignette: - toc: true -vignette: > - %\VignetteIndexEntry{Criteria Methods} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{knitr::rmarkdown} -editor_options: - markdown: - wrap: sentence ---- - -```{r, include = FALSE} -library(magrittr) -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -## Introduction -This vignette provides a short introduction to the idea of analysis criteria. - - - -The analysis request from the authorities will detail an overall set of endpoints, irrespective of the underlying study data. For example, they might request an analysis of serious adverse events by system organ class, by gender. Such an analyses may contain many cells with zeros, or very low counts. To ensure only analyses that contain meaningfully amounts of data are included, the request may additionally include certain criteria for the inclusion of an analysis, such as a threshold for the minimum number of records, events, or subjects. - -The logic for these analysis inclusion criteria are handled by the functions supplied to the `crit_endpoint`, `crit_by_strata_by_trt`, and `crit_by_strata_across_trt` arguments when defining an endpoint in `mk_endpoints_str()`. - -## Criteria levels -Currently, {chef} supports analysis inclusion criteria for three parts of the analysis output, with colors corresponding to the figure below: - -
    -
  1. The entire endpoint
  2. -
  3. The `stat_by_strata_by_trt`
  4. -
  5. The `stat_by_strata_across_trt` and `stat_across_strata_across_trt`
  6. -
-```{r criteria1, echo=FALSE, out.width="100%"} -knitr::include_graphics("./figures/criteria_levels.png") -``` - -The criteria functions are hierarchical; failure to meet criteria at Level A (Entire Endpoint) implies automatic failure at Levels B and C. Similarly, passing Level A but failing Level B results in failure at Level C. - -If an endpoint does not satisfy a certain criterion, the associated statistical functions will not execute to reduce compute time. For instance, if an endpoint does not meet the criteria at Level B, by strata and across treatment arms analyses will not be performed. However, the analyses for the Totals will still proceed. - -It is important to note that these criteria are optional. Not every endpoint definition needs to incorporate all three levels of criteria. Some analyses may have no criteria, while others might require criteria only at the endpoint or strata. - -**NOTE** that the `crit_by_strata_across_trt` criterion gate-keep both `stat_by_strata_across_trt` and `stat_across_strata_across_trt` (stippled box). The `stat_across_strata_across_trt` is seen as lower on the hierarchical criteria ladder than `stat_by_strata_across_trt` - however it does not have a seperate criterion at this time and will therefore be included by default. - -```{r criteria2, echo=FALSE, out.width="100%"} -knitr::include_graphics("./figures/criteria_eval.png") -``` - -## Function formals - -Chef supplies a number of parameters the criterion functions. The parameters vary according to the statistical method. -Note that for `crit_by_strata_by_trt`, and `crit_by_strata_across_trt` the formals of the functions are identical. - -### Input specifications {.tabset .tabset-fade .tabset-pills} - -The criteria functions are served broad set of parameters. This reflects the need for flexibility in the criteria functions. - -The flexibility allows you to set criteria that affect specific stratum based on that specific stratum or on information shared across multiple strata. - -This allows you to create criteria functions which only target one stratum or act across the whole strata: - - -* Criteria1 requires that in the endpoint the strata GENDER must be balanced. (ie approx 50/50 distribution.) (requires only the `stratify_by` parameter) -* Criteria2 requires that for the endpoint all strata (GENDER, AGE) must be balanced. (requires the `stratify_by` parameter) -* *You may also also design criteria for endpoints, which only includes in the case where there is enough subjects in relevant strata.* - -*See [Examples](#example-functions)* - -_NB_ Similar to the stat methods we require that criteria function include ellipses (`...`) as a wildcard parameter. -This is both a convenience, since you then only need to explicitly state the used parameters in your function definition. However, more importantly it will ensure that a criteria function you define today will also work tomorrow, where {chef} may supply more parameters to the criteria functions. - - -#### endpoint criteria functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterTypeExampleDescription
datdata.table::data.tableDataset containing the [Analysis population] (ep_spec_poplation_def.html)
event_index,List(Integer)`[1, 3, 5]`Index (pointing to the `INDEX_` column) of rows with an [Event](ep_spec_event.html)
subjectid_varCharacter`"USUBJID"`The column containing the subject id.
treatment_var,Character`"treatment_name"`The column name describing treatment type
treatment_refval,Character`"Placebo"`The treatment refval for the `treatment_var` column for the endpoint.
period_var,Character`"period_block"`The column name describing the periods
period_value,Character`"within_trial_period"`The value in the `period_var` which is of interest to the endpoint.
endpoint_filterCharacter (escaped)`"\"someColumn\" == \"someValue\""`Specific endpoint filter
endpoint_group_metadataListNamed list containing by_group metadata
stratify_byList(Character)`['Sex', 'Gender']`The [strata](ep_spec_strata_def.html) which the endpoint is sliced by.
- -#### strata criteria functions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterTypeExampleDescription
datdata.table::data.tableDataset containing the [Analysis population](ep_spec_poplation_def.html)
event_index,List(Integer)`[1, 3, 5]`Index (pointing to the `INDEX_` column) of rows with an [Event](ep_spec_event.html)
subjectid_varCharacter`"USUBJID"`The column containing the subject id.
treatment_var,Character`"treatment_name"`The column name describing treatment type
treatment_refval,Character`"Placebo"`The treatment refval for the `treatment_var` column for the endpoint.
period_var,Character`"period_block"`The column name describing the periods
period_value,Character`"within_trial_period"`The value in the `period_var` which is of interest to the endpoint.
endpoint_filterCharacter (escaped)`"\"someColumn\" == \"someValue\""`Specific endpoint filter
endpoint_group_metadataListNamed list containing by_group metadata
stratify_byList(Character)`['Sex', 'Gender']`The [strata](ep_spec_strata_def.html) which the endpoint is sliced by.
strata_varCharacter`"Sex"`The specific stratification which the criteria relates to.
- -### Output specifications - -The output of the criteria function must be a simple `bolean` (`TRUE` or `FALSE`) - -### Example functions {.tabset .tabset-fade .tabset-pills} - -Below are two examples showcasing how to write criteria functions. It is not within scope of the {chef} package to provide a library of criteria functions. - -#### Endpoint criteria functions - -Generic criteria function: -Allows control over endpoint inclusion based on the count of subjects with events in the arms. - -This function is a generic that can the be specified in the mk_endpoint_str or curried beforehand ([Stat methods-Currying](methods_stat.html#custom-functions-currying-and-supplying-parameters-)) - - -```{r, eval=F} - -ep_criteria.treatment_arms_minimum_unique_event_count <- function( - dat, - event_index, - treatment_var, - subjectid_var, - minimum_event_count, - requirement_type = c("any", "all") - ... - ) - # rows with events - dat_events <- dat[J(event_index),] - - # Bolean of whether the count of unique subjects - # within each treatment arm is above the minimum count. - dat_lvl_above_threshold <- dat_events[ - , - list("is_above_minimum" = data.table::uniqueN(subjectid_var)>=minimum_event_count), - by=treatment_var - ] - - if requirement_type == "any": - return( any(dat_lvl_above_threshold$V1) ) - return( all(dat_lvl_above_threshold$V1) ) - - -``` - - -#### Strata criteria functions - -Only include the across strata and treatment arm analysis if there are at least X subjects with events in each treatment arm. -For requirement_type=="any" just a single cell (stratum + treatment arm) needs to be included. -```{r, eval=F} - -ep_strata_criteria.strata_treatment_arm_minimum_unique_count <- function( - dat, - event_index, - treatment_var, - subjectid_var, - strata_var, - minimum_event_count, - requirement_type = c("any", "all") - ... - ) - # rows with events - dat_events <- dat[J(event_index),] - - # Bolean of whether the count of unique subjects - # within each treatment arm is above the minimum count. - dat_lvl_above_threshold <- dat_events[ - , - list("is_above_minimum" = data.table::uniqueN(subjectid_var)>=minimum_event_count), - by=c(treatment_var, strata_var) - ] - - if requirement_type == "any": - return( any(dat_lvl_above_threshold$is_above_minimum) ) - return( all(dat_lvl_above_threshold$is_above_minimum) ) - -``` - - -### Applying criteria functions. - -Criteria functions are supplied to the mk_endpoint_str function and is used to gatekeep which statistical function are run. - -An example of a endpoint could be: - -We look at the population with an event E_XYZ. - -* We are only interested in the endpoint if we see at least 5 subjects in any of the arms that have event E_XYZ. -* We are only interested in getting descriptive statistic for any strata (say GENDER, AGEGRP) if the all levels within have at least 1 subject. -* Finally, we only run the across treatment arm statistics if there are at least 5 subject in each stratum and each treatment arm. - -The above requirements/gates are implemented by currying the functions given in the [Examples](#examples) section. - - -```{r, eval = F} - -# R/project_criteria.R -# Curry the general criteria functions from [Examples](#examples) - -crit_accept_endpoint.5_subjects_any_treatment_arm <- purrr:partial( - ep_criteria.treatment_arms_minimum_unique_event_count, - minimum_event_count = 5, - requirement_type = "any" -) - -crit_strata.1_subject_all_treatment_strata <- purrr:partial( - ep_sg_criteria.sg_treatment_arm_minimum_unique_count, - minimum_event_count = 1, - requirement_type = "all" -) - -crit_strata.5_subject_all_treatment_strata <- purrr:partial( - ep_sg_criteria.sg_treatment_arm_minimum_unique_count, - minimum_event_count = 5, - requirement_type = "all" -) - -# R/project_endpoints.R - -endpoint_XYZ <- mk_endpoint_str( - ..., # Setting the rest of the inputs - crit_endpoint = list(crit_accept_endpoint.5_subjects_any_treatment_arm), - crit_by_strata_by_trt = list(crit_strata.1_subject_all_treatment_strata), - crit_by_strata_across_trt = list(crit_strata.5_subject_all_treatment_strata) -) - -``` diff --git a/vignettes/methods_stat.Rmd b/vignettes/methods_stat.Rmd deleted file mode 100644 index 80ed2c8..0000000 --- a/vignettes/methods_stat.Rmd +++ /dev/null @@ -1,545 +0,0 @@ ---- -title: "Statistical Methods" -output: - rmarkdown::html_vignette: - toc: true - toc_depth: 4 -vignette: > - %\VignetteIndexEntry{Statistical Methods} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} -editor_options: - markdown: - wrap: sentence ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -## Statistical method types in {chef} - -{chef} supports 3 different types of statistical methods. They are differentiated by which inputs they are given and their place in the results table. - -For statistical method in {chef}, we talk about _cell(s)_ which refer both to the location in the result table but more fundamentally to slicing of data relevant to the statistical method type. See more below. - -### stat_by_strata_by_trt - -Those statistics which describes the characteristics for a single stratum and single treatment arm. - -The outputs of these functions are exemplified in the figure below in red. A single _cell_ denoting the output of the statistical function could be: total number of Females in TreatmentA = 120. - -### stat_by_strata_across_trt - -Those statistics which describes a single stratum but across treatments arms. - -The outputs of these functions are exemplified in green in the figure below. For instance one could run a Fischers exact test for 2x2 contigency table (TreatmentA/TreatmentB vs hasEvent/hasNotEvent) but only for Females and get the result of 0.003. - -### stat_across_strata_across_trt - -Defines those statistics which will test the interaction of strata with treatments. - -The outputs of these functions are exemplified in blue in the figure below. For instance one could run a Breslow-Day test testing for effect of stratas (ie. Gender) on the 2x2 contingency tables. - -```{r statDesc, echo=FALSE, out.width="100%"} -knitr::include_graphics("./figures/StatisticsDescription.png") -``` - - - - -## Protocols for statistical functions. - -The supplied inputs to statistical functions will vary dependent on the type and will be described in the following sections. The output however is the same for all statistical functions. - -### Output specification (Shared for all types) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*data.table multirow output*
labelvaluedescription
N120Counts.
n18Counts. with events.
(%)15Rel. proportion
- -They must return a `data.table` object with 2 columns `label`, `value`, and `description` with one or more rows. - -For example: For the red subbox (Female ~ TreatmentA) one could define three functions: - -* One returning total population (`N=120`) -* One returning frequency with event (`n=18`) -* And lastly relative frequency (`(%)=15%`) - -However, one could also have a single function returning all three as a single data.table with 3 rows. - -**NB** Description is also the place to put information about a potential model you are including. You may have stat functions which uses different methods for obtaining p-values depending on data, you could then reflect the choice of method in the description. - - -### Input specifications and examples {.tabset} - -Common for all functions are the `dat` (data in a `data.table::data.table`) and some `index_*` parameters. -The `index_*` points to a column in `dat` called `INDEX_` which the `dat` is also keyed by. -Therefore to obtain those rows - -#### stat_by_strata_by_trt - -{chef} will always supply the following parameters to `stat_by_strata_by_trt` functions. -You must therefore either include them in the function definition or add ellipsis `...` to ignore them. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*Input Specification*
ParameterDatatypeDescription
datdata.tabledatatable containing the full in-risk population.
event_indexlistindexes (the `_INDEX_` column in data) of rows with an event.
cell_indexlistindexes (the `_INDEX_` column in data) of rows matching the cell.*
strata_varcharacterName of the column describing the strata.
strata_valcharacter\|numericspecific value or level for the strata.
treatment_varcharacterName of the column describing the treatment.
treatment_valcharacter\|numericspecific value or level for the treatment.
subjectid_varcharacterName of the column that has the subject id.
- -_* The cell_index will for instance point at those rows where SEX=M and Treatment=A _ - -*Example function* -```{r input_spec_trt_sbglevel, title="Example function", echo=T,eval=F, out.width="100%"} -# function to calculate total sub-population and those with events. -pop_frequency_with_events <- function( - dat, - event_index, - cell_index, - subjectid_var, - ... #Note we wont use the extra supplied parameters so we pass them using dots. - ) - - # Total number in the sub-population. ie. Gender=Male, Treatment=A. data.table logic - N <- dat[J(cell_index), .SD, .SDcols = (subject_id_var)] %>% - data.table::uniqueN() - - n <- dat[ - J(intersect(event_index, cell_index)), - .SD, .SDcols = (subject_id_var) - ] %>% - data.table::uniqueN() - - - # Frequency of events in the sub-population in % - n_f = (n / N) * 100 - - # Prepare output. - out = data.table::data.table( - label = c("N", "n", "(%)"), - value = c(N, n, n_f) - description = c( - "Subjects", - "Subject with events", - "Proportion of subjects with events." - ) - ) - return(out) - -``` - - -#### stat_by_strata_across_trt - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*Input Specification*
ParameterDatatypeDescription
datdata.tabledatatable containing the full in-risk population.
event_indexlistindexes (the `_INDEX_` column in data) of rows with an event.
cell_indexlistindexes (the `_INDEX_` column in data) of rows matching the cell.*
strata_varcharacterName of the column describing the strata
strata_valcharacter\|numericspecific value or level for the strata
treatment_varcharacterName of the column describing the treatment.
treatment_refvalcharacter\|numericspecific value or level for the treatment.
subjectid_varcharacterName of the column that has the subject id.
- -_* Those rows where strata_var==strata_val (ie. Gender==Sex)_ - -*Example function* -```{r input_spec_sbglevel, title="Example function", echo=T,eval=F, out.width="100%"} -# contingency 2x2 table for total number of events -contingency2x2_ptest <- function( - dat, - event_index, - cell_index, - treatment_var, - ... #Note we wont use the extra supplied parameters so we pass them using dots. - ) - - # Test a 2x2 contingency table ie. is there a link between treatment and total number of events - dat_cell <- dat[J(cell_index),] - dat_cell[, is_event := INDEX_ %in% event_index] - - count_table <- dat_cell[, .SD, .SDcols = c("is_event", treatment_var)] %>% - table() - - res <- fisher.test( - count_table, - conf.int=T - ) - - # Prepare output. since confidence interval is two values wrap it in a list. - out = data.table::data.table( - label = c("Pval_independency", "CI_upper", "CI_lower"), - value = c(res$p.value, res$conf.int[1], res$conf.int[2]), - description = "Fishers exact tests of independence of rows and columns in a contingency table." - ) - return(out) - -``` - -#### stat_across_strata_across_trt - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*Input Specification*
ParameterDatatypeDescription
datdata.tabledatatable containing the full in-risk population.
event_indexlistindexes (the `_INDEX_` column in data) of rows with an event.
strata_varcharacterName of the column describing the strata
treatment_varcharacterName of the column describing the treatment.
treatment_refvalcharacter\|numericspecific value or level for the treatment.
subjectid_varcharacterName of the column that has the subject id.
- - -```{r input_spec_sbg, title="Example function", echo=T,eval=F, out.width="100%"} -# Cochran-mante-haenszel test for odds ratios across strata -contingency2x2_strata_test <- function( - dat, - event_index, - strata_var, - treatment_var, - subjectid_var, - ... - ) - - # Test a 2x2 contingency table ie. is there a link between treatment and patients with events - # over multiple strata (strata_var) - dt_unique_subjects <- dat %>% - unique(by=subject_id_var) - dt_unique_subjects[, is_event := INDEX_ %in% event_index] - count_table <- dt_unique_subjects[ - , - .SD, - .SDcols = c("is_event", treatment_var, strata_var) - ] %>% - table() - - res <- stats::mantelhaen.test( - cont_table, - conf.int - ) - - # Prepare output. - out = data.table::data.table( - label = c("Pval_independency", "CI_lower", "CI_upper"), - value = c(res$p.value, res$conf.int[[1]], res$conf.int[[2]]), - description = "Cochran-mante-haenszel test for odds ratios across strata." - ) - return(out) - -``` - -## Adding new statistical methods in {chef} - -Statistical methods can be supplied by multiple means: - -### Use statistical functions defined in external packages - -When using statistical methods from an external package you must include the import statement `library(ExternalPackage)` in the chef project `R/packages.R` file. See [chef setup](chef.html) for how to organize your project. - -Unless the external library is made to work with {chef} you will likely need a wrapper function in order conform with the input and output specification of statistical functions in {chef}. Similar to the use case above for the `contingency2x2_strata_test` function. - - -### Use of custom statistical functions - -You may also defined statistical functions from scratch to use in the pipeline. -Then simply defined the function in a `R/SomeFileWithStatFunctions.R` and apply them as described in below section *Using statistical methods in endpoints* - -## Applying statistical methods in endpoints. - -Statistical methods are supplied to chef via the ``mk_endpoint_str` function. -The function has 3 parameters one for each of the function types: (`stat_by_strata_by_trt`, `stat_by_strata_across_trt`, `stat_across_strata_across_trt`). -The parameters takes a named list of one or more statistical functions and any optional parameters you wish to pass. - -An input could look like: - -```{r, eval=F} - -mk_endpoint_def <- function() { - mk_endpoint_str( - ..., #Other inputs - stat_by_strata_by_trt = list(pop_frequency_with_events) - ) - -# Or adding a custom name -mk_endpoint_def <- function() { - mk_endpoint_str( - ..., #Other inputs - stat_by_strata_by_trt = list("pop_freq_events" = pop_frequency_with_events) - ) -} - -# It may also be that you want to add a parameter to the statistical function. (fictional in this case) -mk_endpoint_def <- function() { - mk_endpoint_str( - ..., #Other inputs - stat_by_strata_by_trt = list( - "pop_freq_events_with_na" = c(pop_frequency_with_events, keep.na=TRUE)) - ) -} -``` - -By adding extra parameters to your functions you have extra flexibility in designing your statistical functions without having to define multiple functions having almost the same functionality. - - -### Custom functions, currying, and supplying parameters. - -The framework allows a high degree of flexibility in supplying statistical functions, only specifying inputs and outputs. Therefore, one may be tempted to add functions with a lot of parameters that is then added during the setup of endpoints using the `mk_endpoint_str` function. - -However, we urge that you create partialised or curried functions and define the in `R/statistical_functions.R` and then call them using the simplified calls in `mk_endpoint_str`. The motivation for this proposal is ease of debugging and easy to read endpoints. - - -#### A toy example showcasing the use of currying. - -Imagine that stats4chef package contains the following function. - -```{r} -chef_percentiles <- function( - dat, - cell_index, - target_column, - percentile, - type = 7, - ... -){ - - label = paste0("Percentile(", percentile, ")") - vals = dat[J(cell_index), .SD, .SDcols = c(target_column)] - q = stats::quantile(vals, probs = percentile, type=type) - - out = data.table::data.table( - label = label, - value = q - ) - return(out) -} - -``` - -Then one could use it directly to get the median of age in a given stratum and treatment arm as: - -```{r,eval=F} -mk_endpoint_def <- function() { - mk_endpoint_str( - ..., #Other inputs - stat_by_strata_by_trt = list( - "median_age" = c( - stats4chef::chef_percentiles, - target_column = "AGE", - percentile = 0.5)) - ) -} -``` -*However, that quickly becomes quite hard to read.* - ---- - -The suggested approach would therefor be to curry the function. - -When currying you set a number of the parameters before executing the function. -We suggest using the `purrr` package, however you can also create a wrapper function instead. - - -In the project have a file `R/specified_stats.R` -```{r, eval=F} -library(stats4chef) -library(purrr) - -#Recommended -median_age <- purrr::partial( - stats4chef::chef_percentiles, - target_column = "AGE", - percentile = 0.5 -) - -#Alternative (Not recommended) -median_age <- function( - dat, - cell_index, - ... -){ - return( - stats4chef::chef_percentiles( - dat = dat, - target_column = "AGE", - percentile = 0.5 - ) - ) -} - - -``` - -Which can then be used in the end point definition as: - -Allowing a much cleaner endpoint definition. - -```{r,eval=F} -mk_endpoint_def <- function() { - mk_endpoint_str( - ..., #Other inputs - stat_by_strata_by_trt = list(median_age) - ) -} -``` diff --git a/vignettes/results_datamodel.Rmd b/vignettes/results_datamodel.Rmd deleted file mode 100644 index 15b6bc3..0000000 --- a/vignettes/results_datamodel.Rmd +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: "Result Data Model" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Result Data Model} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r setup, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -The primary output from {chef} is a `data.table` containing the statistics for each stratum within each endpoint based on the specified analysis data for each endpoint. -For bookkeeping, any endpoints or strata that have not been accepted are stored in another output `data.table`, which has the same data structure, except it only contains a subset of relevant columns. - -The columns in the primary output contain information on six different hierarchical levels, meaning that e.g. information on level 2 is a sub level of level 1. - -The table utilizes a long format structure, ensuring that each row corresponds to the most granular level of information (level 6). This level comprises individual statistical outcomes derived from the applied statistical functions. - -The six information levels in the output data from {chef} are: - -1. **Endpoint specification**. This may entail multiple endpoint specifications within the output. -2. **Endpoint**. If a `group_by` argument is included in the endpoint specification, it will result in a set of endpoints for each group level within that specification. If group_by is absent, only a single endpoint will be generated. -3. **Stratifier**. These are applied to each endpoint. For instance, if the stratifiers are `SEX` and `AGEGR`, the endpoint will be divided into three entities (one for each stratifier, and an additional one for the total, which is a unique, fixed stratification with a singular stratum). -4. **Statistical function**. The R functions designated for application to each stratifier. -5. **Stratum**. The levels of each stratifier. -6. **Statistics**. The statistics corresponding to each stratum. - -The table below describes all the columns in the output table. Note that information on levels 1-5 may be repeated on several rows, since level 6 is the defining row level. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Aggregation levelColumn nameTypeDescription
Level 1: Endpoint specification`endpoint_spec_id`intId of endpoint specification.
`study_metadata`listStudy metadata.
`pop_var`charPopulation filter variable.
`pop_value`charPopulation filter value.
`treatment_var`charTreatment variable.
`treatment_refval`charTreatment reference value.
`period_var`charPeriod filter variable.
`period_value`charPeriod filter value.
`custom_pop_filter`charCustom (free text) population filter.
`endpoint_filter`charEndpoint filter expression.
`group_by`listList of variables to group by (cross combinations).
`stratify_by`listList of variables on which to stratify the analysis data.
`only_strata_with_events`logicalOptionally specify if all strata levels without events should be removed. Default is ``FALSE`.
`key_analysis_data`charId of analysis data set (filtered by population filter).
Level 2: Endpoint`endpoint_group_metadata`listSpecification of group slice.
`endpoint_group_filter`charFilter expression to extract group slice from analysis data.
`endpoint_id`charEndpoint id. One endpoint per group slice.
`endpoint_label`charEndpoint label.
`event_index`listIndices (rows) in the analysis data that are events.
`crit_accept_endpoint`logicalEvaluation of `crit_endpoint`.
Level 3: Stratifier`strata_var`charStratification variable.
`strata_id`charId of stratifier.
`crit_accept_by_strata_by_trt`logicalEvaluation of `crit_by_strata_by_trt`.
`crit_accept_by_strata_across_trt`logicalEvaluation of `crit_by_strata_across_trt`.
Level 4: Statistical function`fn_hash`charId of stat function.
`fn_type`charType of stat function (`stat_by_strata_by_trt`, `stat_by_strata_across_trt`, or `stat_across_strata_across_trt`).
`fn_name`charName of stat function.
`fn_call_char`charStat function parsing (name of stat function and arguments).
Level 5: Stratum`stat_event_exist`logicalIndicates if the stratum contains any events, i.e. if the intersection of `event_index` and `cell_index` is non-empty (`TRUE`) or not (`FALSE`).
`stat_metadata`listSpecification of stratum.
`stat_filter`charFilter expression to extract stratum from analysis data.
`stat_result_id`charId of statistics produced by stat function.
`cell_index`listIndices (rows) in the analysis data included in the stratum.
Level 6: Statistics`stat_result_description`charDescription of the statistics (returned from statistical function).
`stat_result_label`charLabel to the statistics (returned from statistical function).
`stat_result_qualifiers`charQualifiers to the statistics (returned from statistical function).
`stat_result_value`numThe statistical value returned from the statistical function.
diff --git a/vignettes/results_datamodel2tfl.Rmd b/vignettes/results_datamodel2tfl.Rmd deleted file mode 100644 index 073a685..0000000 --- a/vignettes/results_datamodel2tfl.Rmd +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Mapping Results to TFLs" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Mapping Results to TFLs} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r setup, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -No documentation yet. diff --git a/vignettes/table_gen_templates/method_stat_input_ep_crit.tgn b/vignettes/table_gen_templates/method_stat_input_ep_crit.tgn deleted file mode 100644 index 3ac8619..0000000 --- a/vignettes/table_gen_templates/method_stat_input_ep_crit.tgn +++ /dev/null @@ -1 +0,0 @@ -{"rows_views":[[{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}]],"model":{"rows":[[{"value":"Parameter","cspan":1,"rspan":1,"markup":[1,9]},{"value":"Type","cspan":1,"rspan":1,"markup":[1,4]},{"value":"Example","cspan":1,"rspan":1,"markup":[1,7]},{"value":"Description","cspan":1,"rspan":1,"markup":[1,11]}],[{"value":"dat","cspan":1,"rspan":1,"markup":[1,3]},{"value":"data.table::data.table","cspan":1,"rspan":1,"markup":[1,22]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"Dataset containing the [Analysis population] (ep_spec_poplation_def.html)","cspan":1,"rspan":1,"markup":[1,73]}],[{"value":"event_index,","cspan":1,"rspan":1,"markup":[0,12]},{"value":"List(Integer)","cspan":1,"rspan":1,"markup":[1,13]},{"value":"`[1, 3, 5]`","cspan":1,"rspan":1,"markup":[1,11]},{"value":"Index (pointing to the `INDEX_` column) of rows with an [Event](ep_spec_event.html)","cspan":1,"rspan":1,"markup":[1,83]}],[{"value":"treatment_var,","cspan":1,"rspan":1,"markup":[0,14]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"treatment_name\"`","cspan":1,"rspan":1,"markup":[1,18]},{"value":"The column name describing treatment type","cspan":1,"rspan":1,"markup":[1,41]}],[{"value":"treatment_refval,","cspan":1,"rspan":1,"markup":[0,17]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"Placebo\"`","cspan":1,"rspan":1,"markup":[1,11]},{"value":"The treatment refval for the `treatment_var` column for the endpoint.","cspan":1,"rspan":1,"markup":[1,69]}],[{"value":"period_var,","cspan":1,"rspan":1,"markup":[0,11]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"period_block\"`","cspan":1,"rspan":1,"markup":[1,16]},{"value":"The column name describing the periods","cspan":1,"rspan":1,"markup":[1,38]}],[{"value":"period_val,","cspan":1,"rspan":1,"markup":[0,11]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"within_trial_period\"`","cspan":1,"rspan":1,"markup":[1,23]},{"value":"The value in the `period_var` which is of interest to the endpoint.","cspan":1,"rspan":1,"markup":[1,67]}],[{"value":"endpoint_filter","cspan":1,"rspan":1,"markup":[0,15]},{"value":"Character (escaped)","cspan":1,"rspan":1,"markup":[1,19]},{"value":"`\"\\\"someColumn\\\" == \\\"someValue\\\"\"`","cspan":1,"rspan":1,"markup":[1,35]},{"value":"Specific endpoint filter","cspan":1,"rspan":1,"markup":[1,24]}],[{"value":"endpoint_group_metadata","cspan":1,"rspan":1,"markup":[0,23]},{"value":"List","cspan":1,"rspan":1,"markup":[1,4]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"Named list containing endpoint_group metadata","cspan":1,"rspan":1,"markup":[1,45]}],[{"value":"subgroups","cspan":1,"rspan":1,"markup":[0,9]},{"value":"List(Character)","cspan":1,"rspan":1,"markup":[1,15]},{"value":"`['Sex', 'Gender']`","cspan":1,"rspan":1,"markup":[1,19]},{"value":"The [subgroups](ep_spec_subgroup_def.html) which the endpoint is sliced by.","cspan":1,"rspan":1,"markup":[1,75]}]]},"theme":{"ColorTheme":"Light","BorderTheme":"Outer","Alternate Rows/Columns Theme":"Alternate rows"},"fixed_layout":false,"markup":{"instances":[{},{"style":{"fontWeight":"","fontStyle":"","textDecoration":"","color":"","backgroundColor":""}},null]},"options":{}} diff --git a/vignettes/table_gen_templates/method_stat_input_ep_sg_crit.tgn b/vignettes/table_gen_templates/method_stat_input_ep_sg_crit.tgn deleted file mode 100644 index 2841ccd..0000000 --- a/vignettes/table_gen_templates/method_stat_input_ep_sg_crit.tgn +++ /dev/null @@ -1 +0,0 @@ -{"rows_views":[[{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{"bold":true},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}},{"style":{"borders":"","font_style":{"font-weight":"normal"},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":"inherit"}}],[{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}]],"model":{"rows":[[{"value":"Parameter","cspan":1,"rspan":1,"markup":[1,9]},{"value":"Type","cspan":1,"rspan":1,"markup":[1,4]},{"value":"Example","cspan":1,"rspan":1,"markup":[1,7]},{"value":"Description","cspan":1,"rspan":1,"markup":[1,11]}],[{"value":"dat","cspan":1,"rspan":1,"markup":[1,3]},{"value":"data.table::data.table","cspan":1,"rspan":1,"markup":[1,22]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"Dataset containing the [Analysis population] (ep_spec_poplation_def.html)","cspan":1,"rspan":1,"markup":[1,73]}],[{"value":"event_index,","cspan":1,"rspan":1,"markup":[0,12]},{"value":"List(Integer)","cspan":1,"rspan":1,"markup":[1,13]},{"value":"`[1, 3, 5]`","cspan":1,"rspan":1,"markup":[1,11]},{"value":"Index (pointing to the `INDEX_` column) of rows with an [Event](ep_spec_event.html)","cspan":1,"rspan":1,"markup":[1,83]}],[{"value":"treatment_var,","cspan":1,"rspan":1,"markup":[0,14]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"treatment_name\"`","cspan":1,"rspan":1,"markup":[1,18]},{"value":"The column name describing treatment type","cspan":1,"rspan":1,"markup":[1,41]}],[{"value":"treatment_refval,","cspan":1,"rspan":1,"markup":[0,17]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"Placebo\"`","cspan":1,"rspan":1,"markup":[1,11]},{"value":"The treatment refval for the `treatment_var` column for the endpoint.","cspan":1,"rspan":1,"markup":[1,69]}],[{"value":"period_var,","cspan":1,"rspan":1,"markup":[0,11]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"period_block\"`","cspan":1,"rspan":1,"markup":[1,16]},{"value":"The column name describing the periods","cspan":1,"rspan":1,"markup":[1,38]}],[{"value":"period_val,","cspan":1,"rspan":1,"markup":[0,11]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"within_trial_period\"`","cspan":1,"rspan":1,"markup":[1,23]},{"value":"The value in the `period_var` which is of interest to the endpoint.","cspan":1,"rspan":1,"markup":[1,67]}],[{"value":"endpoint_filter","cspan":1,"rspan":1,"markup":[0,15]},{"value":"Character (escaped)","cspan":1,"rspan":1,"markup":[1,19]},{"value":"`\"\\\"someColumn\\\" == \\\"someValue\\\"\"`","cspan":1,"rspan":1,"markup":[1,35]},{"value":"Specific endpoint filter","cspan":1,"rspan":1,"markup":[1,24]}],[{"value":"endpoint_group_metadata","cspan":1,"rspan":1,"markup":[0,23]},{"value":"List","cspan":1,"rspan":1,"markup":[1,4]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"Named list containing endpoint_group metadata","cspan":1,"rspan":1,"markup":[1,45]}],[{"value":"subgroups","cspan":1,"rspan":1,"markup":[0,9]},{"value":"List(Character)","cspan":1,"rspan":1,"markup":[1,15]},{"value":"`['Sex', 'Gender']`","cspan":1,"rspan":1,"markup":[1,19]},{"value":"The [subgroups](ep_spec_subgroup_def.html) which the endpoint is sliced by.","cspan":1,"rspan":1,"markup":[1,75]}],[{"value":"subgroup_var","cspan":1,"rspan":1,"markup":[0,12]},{"value":"Character","cspan":1,"rspan":1,"markup":[1,9]},{"value":"`\"Sex\"`","cspan":1,"rspan":1,"markup":[1,7]},{"value":"The specific subgroup which the criteria relates to.","cspan":1,"rspan":1,"markup":[1,52]}]]},"theme":{"ColorTheme":"Light","BorderTheme":"Outer","Alternate Rows/Columns Theme":"Alternate rows"},"fixed_layout":false,"markup":{"instances":[{},{"style":{"fontWeight":"","fontStyle":"","textDecoration":"","color":"","backgroundColor":""}},null]},"options":{}}