diff --git a/Manuscript/Ex57sol.xls b/Manuscript/Ex57sol.xls deleted file mode 100644 index 00ccdd4..0000000 Binary files a/Manuscript/Ex57sol.xls and /dev/null differ diff --git a/Manuscript/ExcelVersion_Ex66bsol.xls b/Manuscript/ExcelVersion_Ex66bsol.xls deleted file mode 100644 index 20e775a..0000000 Binary files a/Manuscript/ExcelVersion_Ex66bsol.xls and /dev/null differ diff --git a/Manuscript/RCodeHEDiagram.drawio b/Manuscript/RCodeHEDiagram.drawio deleted file mode 100644 index 0065a10..0000000 --- a/Manuscript/RCodeHEDiagram.drawio +++ /dev/null @@ -1 +0,0 @@ -7VptU9s4EP41mYEPZGI7CeFjk0LhDg4KHLSfGMVWbB2yZWQZkv7600ryWxzStBDcBGAGovVKlnf3eXa1ccsZhdMvHMXBGfMwbdkdb9pyPrds+8DuyL8gmGlBz3K0wOfE0yKrEFyRH9gIzTw/JR5OKoqCMSpIXBW6LIqwKyoyxDl7qqpNGK3eNUY+rgmuXETr0lviiUBLB/Z+IT/GxA+yO1v9A30lRJmyeZIkQB57Komcw5Yz4owJ/SmcjjAF22V2uT2Z3dLT+/6Xv74mD+jf4d/X/9zs6cWOfmVK/ggcR+L3lx6g0/O7r6cP4+6BjW6urTP6sGebh31ENDUGMw8rZpkFOUsjD8MqVssZPgVE4KsYuXD1SYaMlAUipObyhFA6YpRxNdeZ9OBXyhPB2T0uXemrH5jBIlGS6x8pNxvDXODpnCt/Ygcrd44MasxCLPhMzjOrOF3jzyygzfCpiA47UwlKkdHLIhqZiPTzpQuryw/G8L/ihMG7c4LV+bkTnDd1Qr9mcuxJIjFDxkXAfBYhelhIh4VTOnJU6JwyFhtX/IeFmBlWRKlgVUfhKRHfYHq7Z0bfS1c+T83KajDLBpH3CXhRDl2KkoS4WnhEaLasfhTY/2/4TNqApdzFS/RMBhCI+1gssWlncQxwTJEgj9XNvcihSzZZAtUO2pWCK7llmc6wwDwkEUmENCCocYLGVGYqe6S5Psawq8iDkUonyoAohImJykQcRJyN0ZjQbJ2yyosgLAFpwsaSNhz64GoTARrGWTYD7Tw1dXLvr+rsGrafx6xdxexgAWY7CzDbXRdku5sC2dfE47M4WwGPz/j39fG4ZJNlPI4VHlEYa3QtgZKsGaFG5BgJUPWQQABBUEhymD7K+pEpbTaZnz9GCQYdFoE6GDXGLpmoG0EFmUNdYgHYYSKpQRCl3eoN4wS1E9gnifyd3VZP7r4vh9K10TiJcw83BvY1JOg5sFv9FdHeXxfaDzYF7dLsfFaaBMPv5WvFNDVqKLE/SxArEMl+k0SSJa4SkbhAJGeI37NHOLmZM+wkjVyDYZ2sEwnflCJeJodspYJkEHWlkuIZlyVQLORo13+lJBWSp8SsPFnShLpx+/r40nDElnNCntkb4wTLqhn5gxRemxT2V60u3qzcX7LLMit4wAqXaRRJ2MMt1SKqNBABVnQQAtA1Q3CcpFSowp6zUP7T1YkIZLj4AYyBZLYc0Y7ddE1vbUxRv8GItlY+MNiNJvoFRwZcxbQG6R58UmhHLmcJgNgjkwlW5lpU3yfp2JdBE1dK/aZP7OuHd7fTOLztmktjysQ7MP1+46bvbQ6zNs+Q/VUZctAoQ/brDDkpmpw35ydgFBYXZU6lKaIp8fb6onSO2TIcOv0/rGlp7W8KDNeOscGKGLMbPVhY9W/ndnzA2KjUKDi8uQCopYkpS9DunilIFMQ0KFu9IU5EG3TbEpTb2SSYB1zzjUPro3O4fjDbqx4p7EaPFPaCI0UA2DwyvcKFbUDdLohUTg2Z/I/Ulsc4kocHkaNcAhtYYP6bAcB9FI7bWTtyR3FBraXo46iNH+OYtE0r4n3ww8pNxN66+MH+aCK+AT+8FPZm6gUj6kxvoqnbf+aEmy2h+cjMmguUfBsviJ36eXa+7ajJw3BDR7PNpMQ2lZrBXcwNqn7YciJovvdo92rO3M7mxLzpm+8LOfW8fH18eafeRm1fwoHW5SQWNU9Iw4iquavvuEUyW7eqL8QZEaLEj4AQpSGxlA/BzERm/U/mQkg8T1H9Iv9W6X9dfrKcqp8WvCVn9Ra4yV6bm+rfveZuurs5P3m/rnIGb+YqOSxeOtaJrHhz2zn8Hw== \ No newline at end of file diff --git a/Manuscript/THR_Model.R b/Manuscript/THR_Model.R deleted file mode 100644 index e15c023..0000000 --- a/Manuscript/THR_Model.R +++ /dev/null @@ -1,378 +0,0 @@ -############ THR model ######################### -### Set working directory as the folder this is stored in -require("rstudioapi") -setwd(dirname(getActiveDocumentContext()$path)) # Set working directory to source file - -#########**** PARAMETERS *****###### -##### DETERMINISTIC PARAMETERS ###### -# DISCOUNT RATES: -dr.c <- 0.06 ## set the discount rate for costs (6%) -dr.o <- 0.015 ## set the discount rate for outcomes (15%) -# COSTS: -c.SP0 <- 394 ## Cost of standard prosthesis -c.NP1 <- 579 ## Cost of new prosthesis 1 -c.primary <- 0 ## Cost of a primary THR procedure - set to 0 for this model -c.success <- 0 ## Cost of success - set to 0 for this model - -## Defining shape and scale parameters to be used in probabilistic analyses -mn.cRevision <- 5294 ## mean cost of revision surgery -se.cRevision <- 1487 ## standard error of cost of revision surgery -a.cRevision <- (mn.cRevision/se.cRevision)^2 ## alpha value for cost of revision surgery -b.cRevision <- (se.cRevision^2)/mn.cRevision ## beta value for cost of revision surgery -### Transition probabilities - alpha & beta values: -a.PTHR2dead <- 2 ## alpha value for operative mortality from primary surgery -b.PTHR2dead <- 100- a.PTHR2dead ## beta value for operative mortality from primary surgery -a.rrr <- 4 ## alpha value for re-revision risk -b.rrr <- 100-a.rrr ## beta value for re-revision risk -## UTILITIES: -# primary prosthesis -mn.uSuccessP <- 0.85 ## mean utility value for successful primary prosthesis -se.uSuccessP <- 0.03 ## standard errror utility value for successful primary prosthesis -ab.uSuccessP <- mn.uSuccessP*(1-mn.uSuccessP)/(se.uSuccessP^2)-1 ## estimating alpha plus beta (ab) -a.uSuccessP <- mn.uSuccessP*ab.uSuccessP ## estimating alpha (a) -b.uSuccessP <- a.uSuccessP*(1-mn.uSuccessP)/mn.uSuccessP ## estimating beta (b) -## revision surgery -mn.uSuccessR <- 0.75 ## mean utility value for having a successful Revision THR -se.uSuccessR <- 0.04 ## standard error utility value for having a successful Revision THR -ab.uSuccessR <- mn.uSuccessR*(1-mn.uSuccessR)/(se.uSuccessR^2)-1 ## alpha + beta (ab) -a.uSuccessR <- mn.uSuccessR*ab.uSuccessR ## alpha (a) -b.uSuccessR <- a.uSuccessR*(1-mn.uSuccessR)/mn.uSuccessR ## beta(b) -## during the revision period -mn.uRevision <- 0.30 ## mean utility score during the revision period -se.uRevision <- 0.03 ## standard error utility score during the revision period -ab.uRevision <- mn.uRevision*(1-mn.uRevision)/(se.uRevision^2)-1 ## alpha + beta (ab) -a.uRevision <- mn.uRevision*ab.uRevision ## alpha (a) -b.uRevision <- a.uRevision*(1-mn.uRevision)/mn.uRevision ## beta(b) - -### Structural inputs -state.names <- c("P-THR","successP-THR","R-THR","successR-THR","Death") -n.states <- length(state.names) # number of states in the model -seed <- c(1,0,0,0,0) # Seed the starting states of the model -cycles <- 60 ## number of cycles running the model -cycle.v <- 1:cycles ## a vector of cycle numbers 1 - 60 -discount.factor.c <- 1/(1+dr.c)^cycle.v ## the discount factor matrix -discount.factor.o <- 1/(1+dr.o)^cycle.v ## discount factor matrix for utility - -# Reading the data needed from csv files -life.table <- read.csv("life-table.csv", header=TRUE) ## importing lifetable -colnames(life.table) <- c("Age","Index","Males","Female") ## making sure column names are correct -hazards <- read.csv("hazardfunction.csv", header=TRUE) ## importing the hazard inputs from the regression analysis -cov.55 <- read.csv("cov55.csv",row.names=1,header=TRUE) ## importing the covariance matrix - -## Hazard function #### -## Coefficients - on the log hazard scale -mn.lngamma <- hazards$coefficient[1] ## Ancilliary parameter in Weibull distribution - equivalent to lngamma coefficient -mn.cons <- hazards$coefficient[2] ##Constant in survival analysis for baseline hazard -mn.ageC <- hazards$coefficient[3] ## Age coefficient in survival analysis for baseline hazard -mn.maleC <- hazards$coefficient[4] ## Male coefficient in survival analysis for baseline hazard -mn.NP1 <- hazards$coefficient[5] -mn <- c(mn.lngamma, mn.cons,mn.ageC,mn.maleC,mn.NP1) ## vector of mean values from the regression analysis -cholm <- t(chol(t(cov.55))) ## lower triangle of the Cholesky decomposition - -#####**** SAMPLE FUNCTION ******##### -sim.runs <- 1000 - -psa.sampling <- function(age = 60, male = 0, sim.runs = 1000){ -#### FUNCTION: sample probablistic parameters according to age and sex -#### INPUTS: age (numeric), male (0 for female, 1 for male), though - #### other variables which are defined above are called within the function - ### e.g. cycles -#### OUTPUTS: a list with data frames and vectors for probablistic parameters - -### Transition probabilities -tp.PTHR2dead <- rbeta(sim.runs, a.PTHR2dead, b.PTHR2dead) ## OMR following primary THR -tp.RTHR2dead <- rbeta(sim.runs, a.PTHR2dead, b.PTHR2dead) ## OMR following revision THR -## creating a data.frame of sampled transition probabilites -omr.df <- data.frame(tp.PTHR2dead, tp.RTHR2dead) -tp.rrr.vec <-rbeta(sim.runs, a.rrr, b.rrr) ## Re-revision risk transitions vector -### Costs -c.revision.vec <- rgamma(sim.runs, shape=a.cRevision, scale=b.cRevision) ## Gamma distribution draw for cost of revision surgery -## Utilities -uSuccessP <- rbeta(sim.runs, a.uSuccessP, b.uSuccessP) -uSuccessR <- rbeta(sim.runs, a.uSuccessR, b.uSuccessR) -uRevision <- rbeta(sim.runs, a.uRevision, b.uRevision) -## Make a data frame to pass into the function -state.utilities.df <- data.frame(uprimary=rep(0, sim.runs), - uSuccessP, uRevision, uSuccessR, - udeath=rep(0, sim.runs)) - -## Hazard function #### -z <- matrix(rnorm(5*sim.runs, 0, 1), nrow = sim.runs, ncol = 5) ## 5 random draws, by sim.runs -r.table <- matrix(0, nrow = sim.runs, ncol = 5) -colnames(r.table) <- c("lngamma", "cons", "age", "male", "NP1") - -for(i in 1:sim.runs){ - Tz <- cholm %*% z[i,] - x <- mn + Tz - r.table[i,] <- x[,1] -} - -r <- as.data.frame(r.table) -gamma.vec <- exp(r$lngamma) -lambda.vec <- exp(r$cons + age * r$age + male*r$male) -RR.vec <- exp(r$NP1) -survival.df <- data.frame(gamma.vec,lambda.vec)## creating a data.frame with the parameters - -# set life table values beased on age and sex (not probablistic but dependent on) -# age and sex variables chosen -current.age <- age + cycle.v ## a vector of cohort age throughout the model -interval <- findInterval(current.age, life.table$Index) -death.risk <- data.frame(age = current.age, males = life.table[interval,3], females = life.table[interval,4]) -col.key <- 3-male -mortality.vec <- death.risk[,col.key] - -## combine outputs -sample.output <- list(RR.vec = RR.vec, - omr.df = omr.df, - tp.rrr.vec = tp.rrr.vec, - survival.df = survival.df, - c.revision.vec = c.revision.vec, - state.utilities.df = state.utilities.df, - mortality.vec = mortality.vec) -return(sample.output) -} - -sample.output <- psa.sampling() - -RR.vec <- sample.output$RR.vec -omr.df <- sample.output$omr.df -tp.rrr.vec <- sample.output$tp.rrr.vec -survival.df <- sample.output$survival.df -c.revision.vec <- sample.output$c.revision.vec -state.utilities.df <- sample.output$state.utilities.df -mortality.vec <- sample.output$mortality.vec -## take a look at the above vectors and data.frames to see the samples produced -## these are defined out of the list here for use in the value of information analysis later on - -####***** THR MODEL FUNCTION ****##### -model.THR <- function(RR.NP1, ## from RR.vec - omr, ## from omr.df - tp.rrr, ## from tp.rrr.vec - survival, ## from survival.df - c.revision, ## from c.revision.vec - state.util,## from state.utilities.df - mortality.vec) { ## background mortality based on life tables for age/sex combination - ### FUNCTION: Run the THR model on the deterministic and probablistic parameters - ## INPUTS: 3 data.frame rows (1 row from omr.df, survival.df & state.utilities.df) - ## and 3 vector values (1 value from vectors; c.revision.vector, tp.rrr.vector, RR.vec)though - #### other variables which are defined above are called within the function - ### e.g. cycles & age/sex are set in the psa.sampling function - ## OUTPUTS: a labelled numeric output providing costs and qalys for SP0 and NP1, given the set of input parameters - - ## First, we need to unpack values from the data.frames provided to the function - # COSTS: - state.costs<-c(c.primary, c.success, c.revision, c.success, 0) ## a vector with the costs for each state - # UTILITIES: - state.utilities <- unlist(state.util) - # TRANSITIONS - tp.PTHR2dead <- unlist(omr[1,1]) - tp.RTHR2dead <- unlist(omr[1,2]) - gamma <- unlist(survival[1,1]) - lambda <- unlist(survival[1,2]) - # Calculate revision risks - revision.risk.sp0 <- 1- exp(lambda * ((cycle.v-1) ^gamma-cycle.v ^gamma)) - revision.risk.np1 <- 1- exp(lambda * RR.NP1 * ((cycle.v-1) ^gamma-cycle.v ^gamma)) - # Transition arrays - tm.SP0 <- array(data=0,dim=c(n.states, n.states, cycles), - dimnames= list(state.names, state.names, 1:cycles)) ## an empty array of dimenions (number of states, number of states, number of cycles) - # Now using vectorisation to complete this - tm.SP0["P-THR","Death",] <- tp.PTHR2dead - tm.SP0["P-THR","successP-THR",] <- 1 - tp.PTHR2dead - tm.SP0["successP-THR","R-THR",] <- revision.risk.sp0 - tm.SP0["successP-THR","Death",] <- mortality.vec - tm.SP0["successP-THR","successP-THR",] <- 1 - revision.risk.sp0 - mortality.vec - tm.SP0["R-THR","Death",] <- tp.RTHR2dead + mortality.vec - tm.SP0["R-THR","successR-THR",] <- 1 - tp.RTHR2dead - mortality.vec - tm.SP0["successR-THR","R-THR",] <- tp.rrr - tm.SP0["successR-THR","Death",] <- mortality.vec - tm.SP0["successR-THR","successR-THR",] <- 1 - tp.rrr - mortality.vec - tm.SP0["Death","Death",] <- 1 - # Create a trace for the standard prosthesis arm - trace.SP0 <- matrix(data=0, nrow=cycles, ncol=n.states) - colnames(trace.SP0) <- state.names - - trace.SP0[1,] <- seed%*%tm.SP0[,,1] - - for (i in 2:cycles) trace.SP0[i,] <- trace.SP0[i-1,]%*%tm.SP0[,,i] - # NP1 ARM - tm.NP1 <- array(data=0,dim=c(n.states, n.states, cycles), - dimnames= list(state.names, state.names, 1:cycles)) ## an empty array of dimenions (number of states, number of states, number of cycles) - ## naming all dimensions - ### create a loop that creates a time dependent transition matrix for each cycle - tm.NP1["P-THR","Death",] <- tp.PTHR2dead ## Primary THR either enter the death state or.. or.. - tm.NP1["P-THR","successP-THR",] <- 1 - tp.PTHR2dead ## they go into the success THR state - tm.NP1["successP-THR","R-THR",] <- revision.risk.np1 ## revision risk with NP1 treatment arm - tm.NP1["successP-THR","Death",] <- mortality.vec - tm.NP1["successP-THR","successP-THR",] <- 1 - revision.risk.np1 - mortality.vec - tm.NP1["R-THR","Death",] <- tp.RTHR2dead + mortality.vec - tm.NP1["R-THR","successR-THR",] <- 1 - tp.RTHR2dead - mortality.vec - tm.NP1["successR-THR","R-THR",] <- tp.rrr - tm.NP1["successR-THR","Death",] <- mortality.vec[i] - tm.NP1["successR-THR","successR-THR",] <- 1 - tp.rrr - mortality.vec - tm.NP1["Death","Death",] <- 1 - - # Create a trace for the standard prosthesis arm - trace.NP1 <- matrix(data=0,nrow=cycles,ncol=n.states) - colnames(trace.NP1) <- state.names - trace.NP1[1,] <- seed%*%tm.NP1[,,1] - for (i in 2:cycles) trace.NP1[i,] <- trace.NP1[i-1,]%*%tm.NP1[,,i] - - # COST # - cost.SP0 <- trace.SP0%*%state.costs - disc.cost.SP0 <- (discount.factor.c%*%cost.SP0) + c.SP0 - cost.NP1 <- trace.NP1%*%state.costs - disc.cost.NP1 <- (discount.factor.c%*%cost.NP1) + c.NP1 - - # QALYS # - QALYs.SP0 <- trace.SP0%*%state.utilities ## utility per cycle - disc.QALYs.SP0 <- colSums(discount.factor.o%*%QALYs.SP0) ## total discounted utility - QALYs.NP1 <- trace.NP1%*%state.utilities ## utility per cycle - disc.QALYs.NP1 <- colSums(discount.factor.o%*%QALYs.NP1) ## total discounted utility - - output <- c(cost.SP0 = disc.cost.SP0, - qalys.SP0 = disc.QALYs.SP0, - cost.NP1 = disc.cost.NP1, - qalys.NP1 = disc.QALYs.NP1, - inc.cost = disc.cost.NP1 - disc.cost.SP0, - inc.qalys = disc.QALYs.NP1 - disc.QALYs.SP0) - - return(output) - -} - -#### RUNNING THE SIMULATIONS ######## -## creating an empty data.frame for simulation results to fill: -simulation.results <- data.frame("cost.SP0" = rep(as.numeric(NA), sim.runs), ## use the rep() function to create sim.runs rows of values - "qalys.SP0"= rep(as.numeric(NA),sim.runs), - "cost.NP1" = rep(as.numeric(NA),sim.runs), - "qalys.NP1" = rep(as.numeric(NA), sim.runs), - "inc.cost" = rep(as.numeric(NA),sim.runs), - "inc.qalys"= rep(as.numeric(NA),sim.runs)) - - -## running the simulations and filling the simulation.results data.frame: -for(i in 1:sim.runs){ - simulation.results[i,] <-model.THR(RR.vec[i], omr.df[i,], - tp.rrr.vec[i], survival.df[i,], - c.revision.vec[i], - state.utilities.df[i,], mortality.vec) - -} - - -# Estimating the average net monetary benefit of the new treatment -p.CE<-function(WTP, simulation.results) {# Estimate the probability of cost-effectiveness for a given willingness-to-pay ceiling ratio - ## a function that estimates the probability of the new intervention - # being cost-effective - # INPUTS: WTP = willingness to pay value (numeric) - # simulation.results = a data.frame output of PSA simulations which includes - # columns names "inc.qalys" and "inc.cost" - # OUTPUTS: A numeric value specifying the probability of cost-effectiveness given the inputs - nmb <- simulation.results[,"inc.qalys"]*WTP - simulation.results[,"inc.cost"] ## vector of NMB estimates for each simulation length 1:sim.runs - CE <- nmb>0 ## vector of TRUE/FALSE for each simulation length 1:sim.runs - probCE<- mean(CE) ## the mean value of TRUE (=1) and FALSE (=0) - return(probCE) -} - -# Generate CEAC table -WTP.values <- seq(from = 0, to = 50000, by = 10) ## use the seq() function to get a vector of specified numeric values -CEAC <- data.frame(WTP = WTP.values, - pCE = rep(as.numeric(NA),length(WTP.values))) - -for (i in 1:length(WTP.values)) { - CEAC[i,"pCE"]<- p.CE(CEAC[i,"WTP"], simulation.results) -} - -##### SUBGROUP ANALYSES ###### -# CREATE ARRAY TO STORE THE RESULTS OF THE MODEL IN EACH SUBGROUP -subgroups.names <- c("Male 40", "Male 60", "Male 80", "Female 40", "Female 60", "Female 80") -subgroups.n <- length(subgroups.names) -simulation.subgroups <- array(data = 0, dim = c(sim.runs, length(colnames(simulation.results)), subgroups.n), - dimnames = list(1:sim.runs, colnames(simulation.results),subgroups.names)) - -# Run model for each subgroup, inputting the age and sex into the function, and record results within the array -sample.sub <- list() -sample.sub[[1]]<- psa.sampling(age = 40, male = 1) -sample.sub[[2]]<- psa.sampling(age = 60, male = 1) -sample.sub[[3]]<- psa.sampling(age = 80, male = 1) -sample.sub[[4]]<- psa.sampling(age = 40, male = 0) -sample.sub[[5]]<- psa.sampling(age = 60, male = 0) -sample.sub[[6]]<- psa.sampling(age = 80, male = 0) - -for(i in 1:sim.runs){ - for(j in 1:6){ ## column = each subgroup - simulation.subgroups[i,,j] <- model.THR(sample.sub[[j]]$RR.vec[i], sample.sub[[j]]$omr.df[i,], sample.sub[[j]]$tp.rrr.vec[i], - sample.sub[[j]]$survival.df[i,],sample.sub[[j]]$c.revision.vec[i], - sample.sub[[j]]$state.utilities.df[i,], mortality.vec = sample.sub[[j]]$mortality.vec) - } -} - -# Create a CEAC table with lambda value sequence -WTP.values <- seq(from = 0, to = 50000, by = 50) -CEAC.subgroups <- matrix(data= as.numeric(NA), nrow=length(WTP.values), ncol=subgroups.n + 1) -CEAC.subgroups <- as.data.frame(CEAC.subgroups) -colnames(CEAC.subgroups) <- c("WTP", subgroups.names) -# Estimate probability cost-effective for all subgroups -for (i in 1:length(WTP.values)) { - CEAC.subgroups[i,1]<-WTP.values[i] - CEAC.subgroups[i,2]<-p.CE(WTP.values[i], simulation.subgroups[,,1]) - CEAC.subgroups[i,3]<-p.CE(WTP.values[i], simulation.subgroups[,,2]) - CEAC.subgroups[i,4]<-p.CE(WTP.values[i], simulation.subgroups[,,3]) - CEAC.subgroups[i,5]<-p.CE(WTP.values[i], simulation.subgroups[,,4]) - CEAC.subgroups[i,6]<-p.CE(WTP.values[i], simulation.subgroups[,,5]) - CEAC.subgroups[i,7]<-p.CE(WTP.values[i], simulation.subgroups[,,6]) -} - - -######***PLOTS****##################### - - #### COST-EFFECTIVENESS PLANES & COST-EFFECTIVENESS ACCEPTABILITY CURVES ##### - # Install package and load library - if(!require(ggplot2)) install.packages('ggplot2') - library(ggplot2) - ## Plotting: - xlabel = "Incremental QALYs" - ylabel = "Incremental costs" - ggplot(simulation.results) + - geom_point(shape = 21, size = 2, colour = "black", fill = NA, alpha = 0.5, aes(x=inc.qalys, y=inc.cost)) + - labs(x = xlabel, text = element_text(size=10)) + labs (y = ylabel, text = element_text(size=10)) + theme_classic() + - theme(legend.title = element_blank(), axis.title=element_text(face="bold"), - axis.title.x = element_text(margin = margin(t = 7, r = 0, b = 3, l = 0)), - axis.title.y = element_text(margin = margin(t = 0, r = 7, b = 0, l = 3)), - panel.grid.major = element_blank(), panel.grid.minor = element_blank(), - legend.key.width=unit(1.8,"line"), text = element_text(size=12), - plot.margin=unit(c(1.2,0.5,0,1.2),"cm")) - - - ## plotting: - xlabel = "Willingness to pay threshold" - ylabel = "Probability cost-effective" - ggplot(CEAC) + geom_line(aes(x=WTP, y=pCE), size=1) + - labs(x = xlabel, text = element_text(size=10)) + labs(y = ylabel, text = element_text(size=10)) + theme_classic() + - theme(legend.title = element_blank(), axis.title=element_text(face="bold"), - axis.title.x = element_text(margin = margin(t = 7, r = 0, b = 3, l = 0)), - axis.title.y = element_text(margin = margin(t = 0, r = 7, b = 0, l = 3)), - panel.grid.major = element_blank(), panel.grid.minor = element_blank(), - legend.key.width=unit(1.8,"line"), text = element_text(size=12)) + - scale_x_continuous(expand = c(0, 0.1)) + - scale_y_continuous(limits = c(0,1), breaks=seq(0,1,0.1), expand = c(0, 0)) - - - ## To plot the CEAC for subgroups We need to reshape the data from wide to long to use in ggplot - # Install package and load library - if(!require(reshape2)) install.packages('reshape2') - library(reshape2) - CEAC.subgroups.long <- melt(CEAC.subgroups, id.vars = c("WTP")) - colnames(CEAC.subgroups.long) <- c("WTP", "group", "pCE") - ## plotting: - xlabel = "Willingness to pay threshold" - ylabel = "Probability cost-effective" - ggplot(CEAC.subgroups.long) + geom_line(aes(x=WTP, y=pCE, color=group), size=1) + - labs(x = xlabel, text = element_text(size=10)) + labs(y = ylabel, text = element_text(size=10)) + theme_classic() + - theme(legend.title = element_blank(), axis.title=element_text(face="bold"), - axis.title.x = element_text(margin = margin(t = 7, r = 0, b = 3, l = 0)), - axis.title.y = element_text(margin = margin(t = 0, r = 7, b = 0, l = 3)), - panel.grid.major = element_blank(), panel.grid.minor = element_blank(), - legend.key.width=unit(1.8,"line"), text = element_text(size=12)) + - scale_x_continuous(expand = c(0, 0.1)) + - scale_y_continuous(limits = c(0,1), breaks=seq(0,1,0.1), expand = c(0, 0)) \ No newline at end of file diff --git a/Manuscript/THR_Model_VOI.R b/Manuscript/THR_Model_VOI.R deleted file mode 100644 index b7ab00b..0000000 --- a/Manuscript/THR_Model_VOI.R +++ /dev/null @@ -1,214 +0,0 @@ -# Loading in data and model -source("Manuscript/THR_Model.R") - -#### SETTING VALUE OF INFORMATION POPULATION PARAMETERS #### -population <- 40000 -years <- 10 -evpi.disc <- 0.06 -population.seq <- population * (1/(1+evpi.disc) ^ c(0:(years-1))) -effective.population <- sum(population.seq) -## Create a vector of willingness to pay values -WTP.values <- seq(from = 0, to = 50000, by = 100) - -#### EXPECTED VALUE OF PERFECT INFORMATION (EVPI) #### - -# Create a function to estimate EVPI (at a population level) -est.EVPI.pop <-function(WTP, effective.population, simulation.results) { - - # Estimate the NMB for two treatments, for each simulation - nmb.SP0 <- simulation.results$qalys.SP0 * WTP - simulation.results$cost.SP0 - nmb.NP1 <- simulation.results$qalys.NP1 * WTP - simulation.results$cost.NP1 - nmb.table <- data.frame(nmb.SP0, nmb.NP1) - - # Estimating NMB with current information - av.current <- apply(nmb.table, 2, mean) - max.current <- max(av.current) - - # Estimating NMB with perfect information - max.nmb <- apply(nmb.table, 1, max) - av.perfect <- mean(max.nmb) - - # Generating EVPI values - EVPI.indiv <- av.perfect - max.current - pop.EVPI <- effective.population * EVPI.indiv - - return(pop.EVPI) - -} - -## create a data frame to capture the WTP values and subsequent population EVPI results - -EVPI.results <- data.frame(WTP=WTP.values, EVPI=NA) - -for (i in 1:length(WTP.values)) { - EVPI.results[i,2] <- est.EVPI.pop(WTP.values[i], effective.population, simulation.results) -} - -## Show the highest EVPI value -EVPI.results[EVPI.results$EVPI == max(EVPI.results$EVPI),] - -#### EXPECTED VALUE OF PARTIAL PERFECT INFORMATION (EVPPI) ANALYSIS #### - -## Enter inner and outer loop numbers - note these must be higher than sim.runs -inner.loops <- 100 -outer.loops <- 100 - - -# Create a matrix to store the inner loop results -inner.results <- matrix(0, inner.loops, 6) -colnames(inner.results) <- c("Cost SP0", "QALY SP0", "Cost NP1", "QALY NP1", "Inc Cost", "Inc QALY") - -evppi.results.SP0 <- matrix(0, nrow = outer.loops, ncol = length(WTP.values)) -colnames(evppi.results.SP0) <- as.character(WTP.values) -evppi.results.NP1 <- evppi.results.SP0 - - -# Creating a function to estimate NMB across WTP values (for inner loop results) -nmb.function <- function(WTP, results){ - - nmb.table <- matrix(WTP, ncol = length(WTP), nrow = dim(results)[1], byrow = TRUE) - - SP0 <- ((results[,"QALY SP0"] * nmb.table) - results[,"Cost SP0"]) - NP1 <- ((results[,"QALY NP1"] * nmb.table) - results[,"Cost NP1"]) - - nmb.SP0 <- apply(SP0, 2, mean) - nmb.NP1 <- apply(NP1, 2, mean) - - ### OUTPUTS: a list of the NMB under SP0 (nmb.SP0) and NP1 (nmb.NP1) - return(list(nmb.SP0, nmb.NP1)) - -} - - -## Function to estimate EVPPI across WTP values, using the outer loop results -gen.evppi.results <- function(evppi.results1 = evppi.results.SP0, evppi.results2 = evppi.results.NP1, WTP = WTP.values, effective.pop = effective.population){ - - ## calculate the mean NMB for current and new treatments, at each WTP - current.info1 <- apply(evppi.results1, 2, mean) - current.info2 <- apply(evppi.results2, 2, mean) - - ## calculate the max NMB (overall) from either current or new treatments, at each WTP - current.info <- pmax(current.info1, current.info2) - - # Create an array - evppi.array <- array(0, dim = c(outer.loops, length(WTP), 2)) - evppi.array[,,1] <- evppi.results1 - evppi.array[,,2] <- evppi.results2 - - # calculate the max NMB for each treatment, at each WTP, for each simulation - # This is so that the best treatment can be select for each simulation - perf.info.sims <- apply(evppi.array, c(1,2), max) - perf.info <- apply(perf.info.sims, 2, mean) - - # Work out the difference between perfect information (for each simulation) vs. current information, at each WTP - evppi.results <- c(perf.info - current.info) * effective.pop - - ### OUTPUT: EVPPI results - return(evppi.results) - -} - - - -# EVPPI SIMULATIONS # - -## Now the EVPPI loops will be run - each selected different values for inner and outer loops -parameter.groups <- 6 -evppi.wide <- data.frame(wtp = WTP.values, RR = NA, OMR = NA, surv = NA, r.cost = NA, RRR = NA, util = NA) -colnames(evppi.wide) <- c("WTP", "NP1 Relative risk", "Operative mortality ratios", "Survival parameters", "Revision cost", "Re-revision risk", "Utilities") - -# Set progres bar -pb = txtProgressBar(min = 0, max = outer.loops, initial = 0, style = 3) - -# Create loops for 1) parameter groups, 2) outer loops 3) inner loops -for(j in 1:parameter.groups){ - - for(a in 1:outer.loops){ - - for(b in 1:inner.loops){ - - # The 'partial' parameter will be included in the outer loop ('a') - # The other parameters are included in the inner loop ('b') - - if(j==1) rr.n <- a else rr.n <- b - if(j==2) omr.n <- a else omr.n <- b - if(j==3) surv.n <- a else surv.n <- b - if(j==4) c.rev.n <- a else c.rev.n <- b - if(j==5) rrr.n <- a else rrr.n <- b - if(j==6) util.n <- a else util.n <- b - - inner.results[b,] <- model.THR(RR.vec[rr.n], omr.df[omr.n,], tp.rrr.vec[rrr.n], - survival.df[surv.n,],c.revision.vec[c.rev.n], - state.utilities.df[util.n,], mortality.vec = mortality.vec) - - } - - #after each inner loop PSA, calculate the mean NMB for each treatment compactor and store the results - nmb <- nmb.function(WTP.values, inner.results) - - evppi.results.SP0[a,] <- nmb[[1]] - evppi.results.NP1[a,] <- nmb[[2]] - setTxtProgressBar(pb,a) - - } - - evppi.wide[,j+1] <- gen.evppi.results() - -} - -# Preview of results -head(evppi.wide,20) - -#####**** PLOTS *****##### -# Install package and load library -if(!require(reshape2)) install.packages('reshape2') -library(reshape2) -if(!require(ggplot2)) install.packages('ggplot2') -library(ggplot2) - -## Plot results EVPI, per population -ggplot(EVPI.results) + geom_line(aes(x=WTP, y=EVPI), size=1) + - labs(x = "Willingness to pay threshold", text = element_text(size=10)) + - labs(y = "Expected Value of Perfect Information", text = element_text(size=10)) + theme_classic() + - theme(legend.title = element_blank(), axis.title=element_text(face="bold"), - panel.grid.major = element_blank(), panel.grid.minor = element_blank(), - legend.key.width=unit(1.8,"line"), text = element_text(size=12), - axis.title.x = element_text(margin = margin(t = 7, r = 0, b = 3, l = 0)), - axis.title.y = element_text(margin = margin(t = 0, r = 7, b = 0, l = 3)), - plot.margin=unit(c(0.5,0.5,0,0.5),"cm")) + - scale_x_continuous(labels = scales::comma, expand = c(0, 0.1)) + - scale_y_continuous(labels = scales::comma, expand = c(0, 0)) - -#### PLOTTING EVPPI RESULTS #### - -# Convert from wide to long format -evppi.long <- reshape2::melt(evppi.wide, id.vars = c("WTP")) - -ggplot(evppi.long) + geom_line(aes(x=WTP, y=value, colour = variable), size=0.75) + - labs(x = "Willingness to pay threshold", text = element_text(size=10)) + - labs(y = "EVPPI", text = element_text(size=10)) + theme_classic() + - theme(legend.title = element_blank(), axis.title=element_text(face="bold"), - axis.title.x = element_text(margin = margin(t = 7, r = 0, b = 3, l = 0)), - axis.title.y = element_text(margin = margin(t = 0, r = 7, b = 0, l = 3)), - panel.grid.major = element_blank(), panel.grid.minor = element_blank(), - legend.key.width=unit(1.8,"line"), text = element_text(size=12), - plot.margin=unit(c(0.5,0,0,0.5),"cm")) + - scale_x_continuous(labels = scales::comma, limits = c(0, 10000), expand = c(0, 0.1)) + - scale_y_continuous(labels = scales::comma, expand = c(0, 0)) -# Note you will get a warning here that the plot does not show all the data in the data.frame - - -# Plotting EVPPI at a particular WTP -sub.evppi <- subset(evppi.long, WTP==2200) - -ggplot(sub.evppi, aes(x=variable, y=value)) + - geom_bar(stat="identity", fill="steelblue")+ - labs(x = "Parameter Group", text = element_text(size=4)) + - labs(y = "EVPPI", text = element_text(size=4)) + theme_classic() + - theme(legend.title = element_blank(), axis.title=element_text(face="bold"), - axis.title.x = element_text(margin = margin(t = 7, r = 0, b = 3, l = 0)), - axis.title.y = element_text(margin = margin(t = 0, r = 7, b = 0, l = 3)), - axis.text.x=element_text(angle=45,hjust=1), - panel.grid.major = element_line(), panel.grid.minor = element_line(), - legend.key.width=unit(1.8,"line"), text = element_text(size=12)) + - scale_y_continuous(labels = scales::comma, expand = c(0, 0)) diff --git a/Manuscript/cov55.csv b/Manuscript/cov55.csv deleted file mode 100644 index a14159b..0000000 --- a/Manuscript/cov55.csv +++ /dev/null @@ -1,6 +0,0 @@ -,lngamma,cons,age,male,NP1 -lngamma,0.00225151199001,,,, -cons,-0.00569100000000,0.04321908366400,,, -age,0.00000002800000,-0.00078300000000,0.00002715660544,, -male,0.00000510000000,-0.00724700000000,0.00003300000000,0.01189539235600, -NP1,0.00025900000000,-0.00064200000000,-0.00011100000000,0.00018400000000,0.14636860414225 diff --git a/Manuscript/hazardfunction.csv b/Manuscript/hazardfunction.csv deleted file mode 100644 index 2c81fc4..0000000 --- a/Manuscript/hazardfunction.csv +++ /dev/null @@ -1,6 +0,0 @@ -explanatory variables,coefficient,se,hazard ratio -lngamma, 0.374097 , 0.047450 , 1.453678 -cons,-5.490935 , 0.207892 , 0.004124 -age,-0.036702 , 0.005211 , 0.963963 -male, 0.768536 , 0.109066 , 2.156607 -NP1,-1.344474 , 0.382582 , 0.260677 diff --git a/Manuscript/life-table.csv b/Manuscript/life-table.csv deleted file mode 100644 index 2025f0f..0000000 --- a/Manuscript/life-table.csv +++ /dev/null @@ -1,7 +0,0 @@ -Age,Index,Males,Females -35-44,35,0.00151,0.00099 -45-54,45,0.00393,0.0026 -55-64,55,0.0109,0.0067 -65-74,65,0.0316,0.0193 -75-84,75,0.0801,0.0535 -85 and over,85,0.1879,0.1548 diff --git a/README.md b/README.md index 4d43d8a..59784e6 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,12 @@ # Decision Modelling for Health Economic Evaluation R Code -[![Project Status: WIP – Initial development is in progress, but there has not yet been a stable, usable release suitable for the public.](https://www.repostatus.org/badges/latest/wip.svg)](https://www.repostatus.org/#wip) - +[![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) ## About This repository was originally created to write R code that was complementary to the "Decision Modelling for Health Economic Evaluation" course. See this website for further information about the course: https://modelling.he-evalcourses.com/ Excel versions of the models used throughout the course are available here (versions aligned with the corresponding book chapters): https://www.herc.ox.ac.uk/downloads/decision-modelling-for-health-economic-evaluation -We then used this code to aid in writing our tutorial paper entitled "Extensions of Health Economic Evaluation in R for Microsoft Excel Users: A Tutorial for incorporating heterogeneity and conducting value of information analyses". ## Repo Layout @@ -21,7 +19,6 @@ This directory has the following structure: Advanced_Course ├── which has all Advanced Course code & data - Manuscript ├── which has all manuscript code & data ``` ### Course Workflow @@ -58,23 +55,10 @@ Each Module (excluding the Reader) has the following: Note: For Modules A3 and A4, the Template and Solution files are split into ‘Part 1’ and ‘Part 2’. The Instructions PDF walks you through when to use which. -### Manuscript Workflow - -The manuscript folder contains: - -* The original THR model R script: THR_Model.R - -* An R script providing code to perform Value of Information analyses on the THR model: THR_Model_VOI.R - -* All necessary data files to run the relevant R scripts: life-table.csv, hazardfunction.csv, cov55.csv. - -* The excel version of the model which was downloaded from "https://www.herc.ox.ac.uk/downloads/decision-modelling-for-health-economic-evaluation" written by Andrew Briggs, Mark Sculpher and Karl Claxton [last accessed 25/08/2021] - ### How to Cite this Work -Prior to the release of the corresponding manuscript, please cite any R code used from the course or the repository as per stated within our License: "Jack Williams[1], Nichola R. Naylor[1] and Andew Briggs, 2021, "Decision Making for Health Economic Evaluation R Code. [1]Equal contribution/Joint First Author" +Please cite any R code used from the course or the repository as per stated within our License: "Jack Williams[1], Nichola R. Naylor[1] and Andew Briggs, 2021, "Decision Making for Health Economic Evaluation R Code. [1]Equal contribution/Joint First Author" -Once the manuscript has been published, please use this citation. [This will be updated once the citation is finalised]. ### Other information