Skip to content

Commit

Permalink
Pull Request follow-up for #30: Example and Vignette runtime (#56)
Browse files Browse the repository at this point in the history
* Added basic tests

* Fixed minor typo; fixed tests

* Added pandoc to gh actions

* Still trying pandoc

* Still trying pandoc

* Still trying pandoc (typo)

* Added latex

* Debugging missing extra packages

* Moved lfe install to test script

* Moved lfe install to test script (debugging)

* Added testthat

* Added haven for some reason

* Gave up and added lfe to recomended packages (for test)

* Set long examples to donotrun; saved pre-computed vignette data

* Remove precompute from R build

* Tests only run conditional on environment variable

* Fix gh workflow typo

* Added missing VignetteResults.rdafile

* Fixed astray math in rd file

* Run tests only on linux

* Added MacOSX tests

* Debugging OSX

* Debugging OSX v2

* Debugging OSX v3

* Added title to sunab fun description
  • Loading branch information
mcaceresb authored May 11, 2024
1 parent 99e05c7 commit fdadae3
Show file tree
Hide file tree
Showing 16 changed files with 536 additions and 292 deletions.
1 change: 1 addition & 0 deletions .Rbuildignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ README
deltaSD.png
^LICENSE\.md$
.github
vignettes/precompute.R
4 changes: 3 additions & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ jobs:
strategy:
matrix:
config:
- {os: macos-latest, r: 'release'}
- {os: ubuntu-latest, r: 'release'}
- {os: macos-latest, r: 'release'}

steps:
- name: Checkout code
Expand Down Expand Up @@ -53,6 +53,8 @@ jobs:
uses: r-lib/actions/setup-tinytex@v2

- name: Check build
env:
HONESTDID_RUN_TESTS: '1'
run: |
devtools::document()
devtools::check()
Expand Down
7 changes: 5 additions & 2 deletions R/arp-nuisance.R
Original file line number Diff line number Diff line change
Expand Up @@ -534,16 +534,19 @@
lpDualSoln = .lp_dual_fn(y_T = y_T_ARP, X_T = X_T_ARP, eta = linSoln$eta_star,
gamma_tilde = linSoln$lambda, sigma = sigma_ARP)

sigma_B_dual = base::sqrt( base::t(lpDualSoln$gamma_tilde) %*% sigma_ARP %*% lpDualSoln$gamma_tilde)
sigma_B_dual2 = base::t(lpDualSoln$gamma_tilde) %*% sigma_ARP %*% lpDualSoln$gamma_tilde

#If sigma_B_dual is 0 to numerical precision, reject iff eta > 0
if(sigma_B_dual < 10^(-10)){
if ( base::abs(sigma_B_dual2) < .Machine$double.eps ) {
base::return(base::list(reject = base::ifelse(linSoln$eta_star > 0, 1, 0),
eta = linSoln$eta_star,
delta = linSoln$delta_star,
lambda = linSoln$lambda))
} else if ( sigma_B_dual2 < 0 ) {
base::stop(".vlo_vup_dual_fn returned a negative variance")
}

sigma_B_dual = base::sqrt(sigma_B_dual2)
maxstat = lpDualSoln$eta/sigma_B_dual

# HYBRID: Modify vlo, vup for the hybrid test
Expand Down
34 changes: 18 additions & 16 deletions R/honest_sunab.R
Original file line number Diff line number Diff line change
@@ -1,36 +1,38 @@
#' @title sunab_beta_vcv
#'
#' @description
#' This function takes a regression estimated using fixest with the sunab option
#' and extracts the aggregated event-study coefficients and their variance-covariance matrix
#'
#' @param sunab_fixest The result of a fixest call using the sunab option
#'
#' @returns A list containing beta (the event-study coefficients),
#' sigma (the variance-covariance matrix), and
#' cohorts (the relative times corresponding to beta, sigma)

sunab_beta_vcv <-
function(sunab_fixest){
sunab_beta_vcv <- function(sunab_fixest) {

## The following code block extracts the weights on individual coefs used in
# the fixest aggregation ##
sunab_agg <- sunab_fixest$model_matrix_info$sunab$agg_period
sunab_names <- names(sunab_fixest$coefficients)
sunab_sel <- grepl(sunab_agg, sunab_names, perl=TRUE)
sunab_names <- base::names(sunab_fixest$coefficients)
sunab_sel <- base::grepl(sunab_agg, sunab_names, perl=TRUE)
sunab_names <- sunab_names[sunab_sel]
if(!is.null(sunab_fixest$weights)){
sunab_wgt <- colSums(sunab_fixest$weights * sign(model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
if(!base::is.null(sunab_fixest$weights)){
sunab_wgt <- base::colSums(sunab_fixest$weights * base::sign(stats::model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
} else {
sunab_wgt <- colSums(sign(model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
sunab_wgt <- base::colSums(base::sign(stats::model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
}

#Construct matrix sunab_trans such that sunab_trans %*% non-aggregated coefs = aggregated coefs,
sunab_cohorts <- as.numeric(gsub(paste0(".*", sunab_agg, ".*"), "\\2", sunab_names, perl=TRUE))
sunab_mat <- model.matrix(~ 0 + factor(sunab_cohorts))
sunab_trans <- solve(t(sunab_mat) %*% (sunab_wgt * sunab_mat)) %*% t(sunab_wgt * sunab_mat)
sunab_cohorts <- base::as.numeric(base::gsub(base::paste0(".*", sunab_agg, ".*"), "\\2", sunab_names, perl=TRUE))
sunab_mat <- stats::model.matrix(~ 0 + base::factor(sunab_cohorts))
sunab_trans <- base::solve(base::t(sunab_mat) %*% (sunab_wgt * sunab_mat)) %*% base::t(sunab_wgt * sunab_mat)

#Get the coefs and vcv
sunab_coefs <- sunab_trans %*% cbind(sunab_fixest$coefficients[sunab_sel])
sunab_vcov <- sunab_trans %*% sunab_fixest$cov.scaled[sunab_sel, sunab_sel] %*% t(sunab_trans)
sunab_coefs <- sunab_trans %*% base::cbind(sunab_fixest$coefficients[sunab_sel])
sunab_vcov <- sunab_trans %*% sunab_fixest$cov.scaled[sunab_sel, sunab_sel] %*% base::t(sunab_trans)

return(list(beta = sunab_coefs,
sigma = sunab_vcov,
cohorts = sort(unique(sunab_cohorts))))
base::return(base::list(beta = sunab_coefs,
sigma = sunab_vcov,
cohorts = base::sort(base::unique(sunab_cohorts))))
}
28 changes: 13 additions & 15 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -383,27 +383,27 @@ function(sunab_fixest){
## The following code block extracts the weights on individual coefs used in
# the fixest aggregation ##
sunab_agg <- sunab_fixest$model_matrix_info$sunab$agg_period
sunab_names <- names(sunab_fixest$coefficients)
sunab_sel <- grepl(sunab_agg, sunab_names, perl=TRUE)
sunab_names <- base::names(sunab_fixest$coefficients)
sunab_sel <- base::grepl(sunab_agg, sunab_names, perl=TRUE)
sunab_names <- sunab_names[sunab_sel]
if(!is.null(sunab_fixest$weights)){
sunab_wgt <- colSums(sunab_fixest$weights * sign(model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
if(!base::is.null(sunab_fixest$weights)){
sunab_wgt <- base::colSums(sunab_fixest$weights * base::sign(stats::model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
} else {
sunab_wgt <- colSums(sign(model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
sunab_wgt <- base::colSums(base::sign(stats::model.matrix(sunab_fixest)[, sunab_names, drop=FALSE]))
}

#Construct matrix sunab_trans such that sunab_trans %*% non-aggregated coefs = aggregated coefs,
sunab_cohorts <- as.numeric(gsub(paste0(".*", sunab_agg, ".*"), "\\2", sunab_names, perl=TRUE))
sunab_mat <- model.matrix(~ 0 + factor(sunab_cohorts))
sunab_trans <- solve(t(sunab_mat) %*% (sunab_wgt * sunab_mat)) %*% t(sunab_wgt * sunab_mat)
sunab_cohorts <- base::as.numeric(base::gsub(base::paste0(".*", sunab_agg, ".*"), "\\2", sunab_names, perl=TRUE))
sunab_mat <- stats::model.matrix(~ 0 + base::factor(sunab_cohorts))
sunab_trans <- base::solve(base::t(sunab_mat) %*% (sunab_wgt * sunab_mat)) %*% base::t(sunab_wgt * sunab_mat)

#Get the coefs and vcv
sunab_coefs <- sunab_trans %*% cbind(sunab_fixest$coefficients[sunab_sel])
sunab_vcov <- sunab_trans %*% sunab_fixest$cov.scaled[sunab_sel, sunab_sel] %*% t(sunab_trans)
sunab_coefs <- sunab_trans %*% base::cbind(sunab_fixest$coefficients[sunab_sel])
sunab_vcov <- sunab_trans %*% sunab_fixest$cov.scaled[sunab_sel, sunab_sel] %*% base::t(sunab_trans)

return(list(beta = sunab_coefs,
sigma = sunab_vcov,
cohorts = sort(unique(sunab_cohorts))))
base::return(base::list(beta = sunab_coefs,
sigma = sunab_vcov,
cohorts = base::sort(base::unique(sunab_cohorts))))
}
```

Expand Down Expand Up @@ -475,8 +475,6 @@ honest_did <- function(...) UseMethod("honest_did")
#' points for computational reasons.
#' @param ... Parameters to pass to `createSensitivityResults` or
#' `createSensitivityResults_relativeMagnitudes`.
#' @inheritParams HonestDiD::createSensitivityResults
#' @inheritParams HonestDid::createSensitivityResults_relativeMagnitudes
honest_did.AGGTEobj <- function(es,
e = 0,
type = c("smoothness", "relative_magnitude"),
Expand Down
Binary file added data/VignetteResults.rda
Binary file not shown.
18 changes: 18 additions & 0 deletions man/VignetteResults.Rd
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
\docType{data}
\name{VignetteResults}
\alias{VignetteResults}
\title{
Pre-computed results to use in the Vignette.
}
\description{
This list contains pre-comuted time-intensite results used in the vignette.
}
\format{
\describe{A list, containing 5 results shown in the vignette; refer to the text for details.
\item{BC_DeltaSDNB_RobustResults}{}
\item{BC_DeltaSDRM_RobustResults}{}
\item{BC_OriginalResults}{}
\item{LW_DeltaSD_RobustResults}{}
\item{LW_DeltaSDD_RobustResults}{}
}
}
2 changes: 2 additions & 0 deletions man/createEventStudyPlot.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ Rambachan, Ashesh and Jonathan Roth. "An Honest Approach to Parallel Trends." 20
Ashesh Rambachan
}
\examples{
\dontrun{
# Simple use case; for more detailed examples,
# see <https://github.com/asheshrambachan/HonestDiD#honestdid>
createEventStudyPlot(betahat = BCdata_EventStudy$betahat,
Expand All @@ -62,3 +63,4 @@ Ashesh Rambachan
timeVec = BCdata_EventStudy$timeVec,
referencePeriod = BCdata_EventStudy$referencePeriod)
}
}
2 changes: 2 additions & 0 deletions man/createSensitivityPlot.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ Returns ggplot object of the sensitivity plot.
Ashesh Rambachan
}
\examples{
\dontrun{
# Simple use case; for more detailed examples,
# see <https://github.com/asheshrambachan/HonestDiD#honestdid>
robustResults <-
Expand All @@ -55,3 +56,4 @@ Returns ggplot object of the sensitivity plot.
alpha = 0.05)
createSensitivityPlot(robustResults, originalResults)
}
}
8 changes: 4 additions & 4 deletions man/createSensitivityPlot_relativeMagnitudes.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,16 @@ Returns ggplot object of the sensitivity plot.
Ashesh Rambachan
}
\examples{
\dontrun{
# Simple use case. For more detailed examples,
# see <https://github.com/asheshrambachan/HonestDiD#honestdid>;
# for additional precision, increase the number of gridPoints.
# see <https://github.com/asheshrambachan/HonestDiD#honestdid>
kwargs <- list(betahat = BCdata_EventStudy$betahat,
sigma = BCdata_EventStudy$sigma,
numPrePeriods = length(BCdata_EventStudy$prePeriodIndices),
numPostPeriods = length(BCdata_EventStudy$postPeriodIndices),
alpha = 0.05)
robustResults <- do.call(createSensitivityResults_relativeMagnitudes,
c(kwargs, list(gridPoints=100)))
robustResults <- do.call(createSensitivityResults_relativeMagnitudes, kwargs)
originalResults <- do.call(constructOriginalCS, kwargs)
createSensitivityPlot_relativeMagnitudes(robustResults, originalResults)
}
}
2 changes: 2 additions & 0 deletions man/createSensitivityResults.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ Rambachan, Ashesh and Jonathan Roth. "An Honest Approach to Parallel Trends." 20
Ashesh Rambachan
}
\examples{
\dontrun{
# Simple use case; for more detailed examples,
# see <https://github.com/asheshrambachan/HonestDiD#honestdid>
createSensitivityResults(betahat = BCdata_EventStudy$betahat,
Expand All @@ -83,3 +84,4 @@ Ashesh Rambachan
numPostPeriods = length(BCdata_EventStudy$postPeriodIndices),
alpha = 0.05)
}
}
10 changes: 5 additions & 5 deletions man/createSensitivityResults_relativeMagnitudes.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ createSensitivityResults_relativeMagnitudes(betahat, sigma,
Covariance matrix of event study coefficients.
}
\item{numPrePeriods}{
Number of pre-periods. If user selects bound = "deviation from linear trends" (Delta^{SDRM} as base choice of Delta), then numPrePeriods must be greater than one. See details for further explanation.
Number of pre-periods. If user selects bound = "deviation from linear trends" (\eqn{Delta^{SDRM}} as base choice of Delta), then numPrePeriods must be greater than one. See details for further explanation.
}
\item{numPostPeriods}{
Number of post-periods.
Expand Down Expand Up @@ -99,14 +99,14 @@ Rambachan, Ashesh and Jonathan Roth. "An Honest Approach to Parallel Trends." 20
Ashesh Rambachan
}
\examples{
\dontrun{
# Simple use case. For more detailed examples,
# see <https://github.com/asheshrambachan/HonestDiD#honestdid>;
# for additional precision, increase the number of gridPoints.
# see <https://github.com/asheshrambachan/HonestDiD#honestdid>
kwargs <- list(betahat = BCdata_EventStudy$betahat,
sigma = BCdata_EventStudy$sigma,
numPrePeriods = length(BCdata_EventStudy$prePeriodIndices),
numPostPeriods = length(BCdata_EventStudy$postPeriodIndices),
alpha = 0.05,
gridPoints = 100)
alpha = 0.05)
do.call(createSensitivityResults_relativeMagnitudes, kwargs)
}
}
Loading

0 comments on commit fdadae3

Please sign in to comment.