Skip to content

Commit

Permalink
Merge pull request #315 from UCD-SERG/use-test-data
Browse files Browse the repository at this point in the history
Use test data
  • Loading branch information
d-morrison authored Nov 5, 2024
2 parents cbefd9c + 8cefedf commit afd8d4f
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 102 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Type: Package
Package: serocalculator
Title: Estimating Infection Rates from Serological Data
Version: 1.2.0.9020
Version: 1.2.0.9021
Authors@R: c(
person("Peter", "Teunis", , "p.teunis@emory.edu", role = c("aut", "cph"),
comment = "Author of the method and original code."),
Expand Down
2 changes: 2 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@

## Internal changes

* Added `snapshot_value` test for `est.incidence()` (#315)

* Sped up `lint-changed-files` GitHub Action (#317)

* Added online preview builds for PRs that change the `pkgdown` website (#309)
Expand Down
12 changes: 10 additions & 2 deletions tests/testthat/_snaps/est.incidence.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
# est.incidence() produces expected results for typhoid data

Code
typhoid_results
summary(typhoid_results)
Output
# A tibble: 1 x 10
est.start incidence.rate SE CI.lwr CI.upr coverage log.lik iterations
<dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <int>
1 0.1 0.133 0.0216 0.0962 0.182 0.95 -261. 4
1 0.1 0.166 0.0178 0.135 0.205 0.95 -524. 5
# i 2 more variables: antigen.isos <chr>, nlm.convergence.code <ord>

---

structure(list(minimum = 523.575044823023, estimate = -1.7955958453869,
gradient = 3.60891331241403e-06, hessian = structure(86.991906300701, dim = c(1L,
1L)), code = 1L, iterations = 5L), class = c("seroincidence",
"list"), lambda_start = 0.1, antigen_isos = c("HlyE_IgG", "HlyE_IgA"
))

2 changes: 1 addition & 1 deletion tests/testthat/_snaps/log_likelihood.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# `log_likelihood()` gives consistent results

-9268.8238
-533.379886031329

99 changes: 26 additions & 73 deletions tests/testthat/test-est.incidence.R
Original file line number Diff line number Diff line change
@@ -1,81 +1,34 @@
test_that(
"est.incidence() produces expected results for typhoid data",
{
library(dplyr)
# get pop data
xs_data <- load_pop_data(
file_path = "https://osf.io/download//n6cp3/",
age = "Age",
value = "result",
id = "index_id",
standardize = TRUE
) %>%
filter(Country == "Pakistan") %>%
slice_head(n = 100)
test_that("est.incidence() produces expected results for typhoid data", {
typhoid_results <- est.incidence(
pop_data = sees_pop_data_pk_100,
curve_param = typhoid_curves_nostrat_100,
noise_param = example_noise_params_pk,
antigen_isos = c("HlyE_IgG", "HlyE_IgA")
)

# get noise data
noise <- load_noise_params("https://osf.io/download//hqy4v/") %>%
filter(Country == "Pakistan")
expect_snapshot(x = summary(typhoid_results))

# get curve data
curve <- load_curve_params("https://osf.io/download/rtw5k/") # slice if test is too slow (.by = antigen_iso))
expect_snapshot_value(typhoid_results, style = "deparse", tolerance = 1e-4)
})

# set start
start <- .05
test_that(
"`est.incidence()` produces consistent results
regardless of whether data colnames are standardized.",
{
est_true <- est.incidence(
pop_data = sees_pop_data_pk_100,
curve_param = typhoid_curves_nostrat_100,
noise_param = example_noise_params_pk,
antigen_isos = c("HlyE_IgG", "HlyE_IgA")
)

typhoid_results <- est.incidence(
pop_data = xs_data,
curve_param = curve,
noise_param = noise,
est_false <- est.incidence(
pop_data = sees_pop_data_pk_100_old_names,
curve_param = typhoid_curves_nostrat_100,
noise_param = example_noise_params_pk,
antigen_isos = c("HlyE_IgG", "HlyE_IgA")
) %>%
summary.seroincidence(
coverage = .95,
start = start
)
)

expect_snapshot(x = typhoid_results)
expect_equal(est_true, est_false)
}
)

test_that("`est.incidence()` produces expected results", {
curves <- load_curve_params("https://osf.io/download/rtw5k/")
noise <- load_noise_params("https://osf.io/download//hqy4v/")
xs_data_true <- load_pop_data(
file_path = "https://osf.io/download//n6cp3/",
age = "Age",
value = "result",
id = "index_id",
standardize = TRUE
) %>%
filter(Country == "Pakistan") %>%
slice_head(n = 100)


est_true <- est.incidence(
pop_data = xs_data_true,
curve_params = curves,
noise_params = noise %>% filter(Country == "Pakistan"),
antigen_isos = c("HlyE_IgG", "HlyE_IgA")
)

xs_data_false <- load_pop_data(
file_path = "https://osf.io/download//n6cp3/",
age = "Age",
value = "result",
id = "index_id",
standardize = FALSE
) %>%
filter(Country == "Pakistan") %>%
slice_head(n = 100)


est_false <- est.incidence(
pop_data = xs_data_false,
curve_params = curves,
noise_params = noise %>% filter(Country == "Pakistan"),
antigen_isos = c("HlyE_IgG", "HlyE_IgA")
)

expect_equal(est_true, est_false)
})
30 changes: 5 additions & 25 deletions tests/testthat/test-log_likelihood.R
Original file line number Diff line number Diff line change
@@ -1,34 +1,14 @@
test_that("`log_likelihood()` gives consistent results", {
library(dplyr)
library(tibble)

# load in longitudinal parameters
dmcmc <- load_curve_params("https://osf.io/download/rtw5k")

xs_data <- "https://osf.io/download//n6cp3/" %>%
load_pop_data()

# Load noise params
cond <- tibble(
antigen_iso = c("HlyE_IgG", "HlyE_IgA"),
nu = c(0.5, 0.5),
# Biologic noise (nu)
eps = c(0, 0),
# M noise (eps)
y.low = c(1, 1),
# low cutoff (llod)
y.high = c(5e6, 5e6)
) # high cutoff (y.high)

# Calculate log-likelihood
ll_AG <- log_likelihood( # nolint: object_name_linter
pop_data = xs_data,
curve_params = dmcmc,
noise_params = cond,
ll_ag <- log_likelihood(
pop_data = sees_pop_data_pk_100,
curve_param = typhoid_curves_nostrat_100,
noise_param = example_noise_params_pk,
antigen_isos = c("HlyE_IgG", "HlyE_IgA"),
lambda = 0.1
)

expect_snapshot_value(ll_AG)
expect_snapshot_value(ll_ag, style = "deparse")

})

0 comments on commit afd8d4f

Please sign in to comment.