diff --git a/DESCRIPTION b/DESCRIPTION
index 3322540..bf0ee0d 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -20,6 +20,10 @@ Authors@R: c(
family = "Quevedo",
role = "aut",
comment = c(ORCID = "0000-0003-0129-981X")),
+ person(given = "Sarah",
+ family = "Wright",
+ role = "aut",
+ comment = c(ORCID = "0009-0004-5060-2189")),
person(given = "Sarah",
family = "Kelso",
role = "ctb",
diff --git a/NEWS.md b/NEWS.md
index 3a11070..47bea87 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,4 +1,6 @@
# QCkit v0.1.7
+2024-04-18
+* Added the function `generate_ll_from_utm()` which supersedes `convert_utm_to_ll()` and improves upon it in several ways, included accepting a column of UTMs and also returns a column of CRS along with the decimal degrees latitude and longitude.
2024-04-17
* Major updates to the DRR template including: using snake case instead of camel case for variables; updating Table 3 to only display filenames only when there are multiple files, fixed multiple issues with footnotes, added citations to NPSdataverse packages, added a section that prints the R code needed to download the data package and load it in to R.
* Updated the DRR documentation to account for new variable names.
diff --git a/Untitled/Untitled.Rmd b/Untitled/Untitled.Rmd
deleted file mode 100644
index c11ffeb..0000000
--- a/Untitled/Untitled.Rmd
+++ /dev/null
@@ -1,589 +0,0 @@
----
-output:
- word_document: default
- pdf_document: default
-bibliography: references.bib
-csl: national-park-service-DRR.csl
----
-
-```{=html}
-
-```
-```{r user_edited_parameterss, include=FALSE}
-# The title of your DRR. Should all DRR start with "Data Release Report:"? Should we enforce titles specifically referencing the data package(s) the Report is about?
-title <- "Sample DRR Title"
-
-# Optional and should only be included if publishing to the semi-official DRR series. Contact Joe if you are. If not, leave as NULL
-reportNumber <- ": get this number from Joe DeVivo"
-
-# This should match the Data Store Reference ID for this DRR. Eventually we should be able to pull this directly from the data package metadata.
-DRR_DSRefID <- 0000000
-
-#Author names and affiliations:
-
-#One way to think of the author information is that you are building a table:
-
-# Author | Affiliation | ORCID
-# Jane | Institute 1 | 0000-1111-2222-3333
-# Jane | Institute 2 | 0000-1111-2222-3333
-# John | Institute 2 | NA
-
-#once the table is built, authors can be associated with the appropriate institute via relevant superscripts and the institutes can be listed only once in the DRR.
-
-# list the authors. If an author has multiple institutional affiliations, you must list the author multiple times. In this example, Jane Doe is listed twice because she has two affiliations.
-authorNames <- c(
- "Jane Doe",
- "Jane Doe",
- "John Doe"
-)
-
-# List author affiliations. The order of author affiliations must match the order of the authors in AuthorNames. If an author has multiple affiliations, the author must be listed 2 (or more) times under authorNames (above) and each affiliation should be listed in order. If authors share the same affiliation, the affiliation should be listed once for each author. In this case, Managed Business Solutions (MBS) is listed twice because it is associated with two authors. MBS will only print to the DRR once.
-
-#Note that the entirety of each affiliation is enclosed in quotations. Do not worry about indentation or word wrapping.
-authorAffiliations <- c(
- "NPS Inventory and Monitoring Division, 1201 Oakridge Dr., Suite 150, Fort Collins, Colorado",
-
- "Managed Business Solutions (MBS), a Sealaska Company, Contractor to the National Park Service, Natural Resource Stewardship and Science Directorate, 1201 Oakridge Dr., Suite 150, Fort Collins, Colorado",
-
- "Managed Business Solutions (MBS), a Sealaska Company, Contractor to the National Park Service, Natural Resource Stewardship and Science Directorate, 1201 Oakridge Dr., Suite 150, Fort Collins, Colorado"
-)
-
-# List the ORCID iDs for each author in the format "(xxxx-xxxx-xxxx-xxxx)". If an author does not have an ORCID iD, specify NA (no quotes). If an author is listed more than once (for instance because they have multiple institutional affiliations), the ORCID iD must also be listed more than once. For more information on ORCID iDs and to register an ORCID iD, see https://www.orcid.org.
-
-# The order of the ORCID iDs must match the order of authors in AuthorNames.In this example, Jane Doe has an ORCID iD but John Doe does not. Jane's ORCID iD is listed twice because she her name is listed twice in authorNames(because she has two authorAffiliations).
-authorORCID <- c(
- "(0000-1111-2222-3333)", "(0000-1111-2222-3333)", NA
- )
-
-# Replace the text below with your abstract.
-DRRabstract <- "Abstract Should go here. Multiple Lines are okay; it'll format correctly. Pay careful attention to non-standard characters, line breaks ( ), carriage returns, and curly-quotes. You may find it useful to write the abstract in NotePad++ or some other text editor and not a word processor (such as Microsoft Word).\n\n
-
-Note that if you need multiple paragraphs or line breaks you can generate them using a combination of backslashes and n's. \n\n
-
-The abstract should succinctly describe the study, the assay(s) performed, the resulting data, and their reuse potential, but should not make any claims regarding new scientific findings. No references are allowed in this section."
-
-# DataStore reference ID for the data package associated with this report. You must have at least one data package.Eventually, we will automate importing much of this information from metadata.
-dataPackageRefID <- c(9999999)
-
-# Must match title in DataStore and metadata
-dataPackageTitle <- "Data Package Title"
-
-# Must match descriptions in the data package metadata
-dataPackageDescription <- "Short title for data package1"
-
-# generates your data package DOI based on the data package DataStore reference ID. This is different from the DRR DOI! No need to edit this.
-dataPackageDOI <- paste0("https://doi.org/10.57830/", dataPackageRefID)
-
-# list the file names in your data package. Do NOT include metadata files.
-dataPackage_fileNames <- c(
- "my_data.csv",
- "my_data2.csv"
-)
-
-# list the approximate size of each data file. Make sure the order corresponds to the order of of the file names in dataPackage_fileNames
-dataPackage_fileSizes <- c("0.8 MB", "10 GB")
-
-# list a short, one-line description of each data file. Descriptions must be in the same order as the filenames.
-dataPackage_fileDescript <- c(
- "This is a short description of my_data.csv (a good guideline is 10 words or less).",
- "This is a short description of my_data2.csv.")
-```
-
-```{r setup_do_not_edit, include=FALSE}
-Rpackages <- c("markdown",
- "rmarkdown",
- "pander",
- "knitr",
- "yaml",
- "kableExtra",
- "devtools",
- "tidyverse",
- "here")
-
-inst <- Rpackages %in% installed.packages()
-if (length(Rpackages[!inst]) > 0) {
- install.packages(Rpackages[!inst], dep = TRUE, repos = "https://cloud.r-project.org")
-}
-lapply(Rpackages, library, character.only = TRUE)
-
-devtools::install_github("EmilyMarkowitz-NOAA/NMFSReports")
-library(NMFSReports)
-devtools::install_github("nationalparkservice/QCkit")
-library(QCkit)
-```
-
-*`r (paste0("https://doi.org/10.38750/", DRR_DSRefID))`*
-
-```{r title_do_not_edit, echo=FALSE, results="asis"}
-date <- format(Sys.time(), "%d %B, %Y")
-cat("#", title, "\n")
-if (!is.null(reportNumber)) {
- subtitle <- paste0("Data Release Report ", reportNumber)
- cat("###", subtitle)
-}
-```
-
-```{r authors_do_not_edit, echo=FALSE, results="asis"}
-author_list <- data.frame(authorNames, authorAffiliations, authorORCID)
-unique_authors <- author_list %>% distinct(authorNames,
- .keep_all = TRUE)
-unique_affiliation <- author_list %>% distinct(authorAffiliations,
- .keep_all = TRUE)
-
-#single author documents:
-if(length(seq_along(unique_authors$authorNames)) == 1){
-
- for (i in seq_along(unique_authors$authorNames)) {
- curr <- unique_authors[i, ]
-
- #find all author affiliations
- aff <- author_list[which(authorNames == curr$authorNames),]
- aff <- aff$authorAffiliations
-
- #identify order of affiliation(s) in a unique list of affiliations
- #build the superscripts for author affiliations
- super_script <- unique_affiliation$authorAffiliations %in% aff
- super <- which(super_script == TRUE)
- script <- super
-
- if(length(seq_along(super)) > 1){
- script <- NULL
- j <- 1
- while(j < length(seq_along(super))){
- script <- append(script, paste0(super[j],","))
- j <- j+1
- }
- if(j == length(seq_along(super))){
- script <- append(script, super[j])
- }
- }
- }
- cat("#### ", curr$authorNames, sep="")
- if (is.na(curr$authorORCID)) {
- }
- if (!is.na(curr$authorORCID)) {
- orc <- paste0(" ", curr$authorORCID, "")
- cat({{ orc }})
- }
- cat(" ^",script,"^", " ", " ", sep="")
-
- #cat("#### ", unique_authors$authorNames, "^1^", sep="")
- #if(!is.na(authorORCID)){
- # orc <- paste0(" https://orcid.org/", unique_authors$authorORCID)
- # cat({{ orc }}, "\n")
- #}
- #cat("#### ", unique_authors$authorAffiliations, sep="")
-}
-
-#multi author documents:
-if(length(seq_along(unique_authors$authorNames)) > 1){
- for (i in seq_along(unique_authors$authorNames)) {
- curr <- unique_authors[i, ]
-
- #find all author affiliations
- aff <- author_list[which(authorNames == curr$authorNames),]
- aff <- aff$authorAffiliations
-
- #identify order of affiliation(s) in a unique list of affiliations
- #build the superscripts for author affiliations
- super_script <- unique_affiliation$authorAffiliations %in% aff
- super <- which(super_script == TRUE)
- script <- super
-
- if(length(seq_along(super)) > 1){
- script <- NULL
- j <- 1
- while(j < length(seq_along(super))){
- script <- append(script, paste0(super[j],","))
- j <- j+1
- }
- if(j == length(seq_along(super))){
- script <- append(script, super[j])
- }
- }
-
- # if NOT the second-to-last author:
- if(i < (length(seq_along(unique_authors$authorNames)) - 1)){
- cat("#### ", curr$authorNames, " ", sep="")
- if (is.na(curr$authorORCID)) {
- }
- if (!is.na(curr$authorORCID)) {
- orc <- paste0(" ", curr$authorORCID, " ")
- cat({{ orc }})
- }
- cat( " ^", script, "^", ", ", " ", sep = "")
- }
-
- # if IS the second-to-last author
- if(i == (length(seq_along(unique_authors$authorNames)) - 1)){
-
- #if 3 or more authors, include a comma before the "and":
- if(length(seq_along(unique_authors$authorNames)) > 2){
- cat(curr$authorNames, sep="")
- if (is.na(curr$authorORCID)) {
- }
- if (!is.na(curr$authorORCID)) {
- orc <- paste0(" ", curr$authorORCID, " ")
- cat({{ orc }})
- }
- cat(" ^",script,"^", ", ", sep="")
- cat("and ", sep="")
- }
-
- #If only 2 authors, omit comma before "and":
- if(length(seq_along(unique_authors$authorNames)) == 2){
- cat("#### ", curr$authorNames, sep="")
- if (is.na(curr$authorORCID)) {
- }
- if (!is.na(curr$authorORCID)) {
- orc <- paste0(" ", curr$authorORCID, " ")
- cat({{ orc }})
- }
- cat(" ^",script,"^ ", sep = "")
- cat("and ", sep="")
- }
- }
-
- # if IS the Last author :
- if(i == length(seq_along(unique_authors$authorNames))){
- cat(curr$authorNames, sep="")
- if (is.na(curr$authorORCID)) {
- }
- if (!is.na(curr$authorORCID)) {
- orc <- paste0(" ", curr$authorORCID, " ")
- cat({{ orc }})
- }
- cat(" ^", script, "^", sep = "")
- }
- }
-}
-cat("\n\n")
-for(i in 1:nrow(unique_affiliation)){
- cat("^",i,"^ ", unique_affiliation[i,2], "\n\n", sep="")
- }
-```
-
-# Abstract
-
-`r DRRabstract`
-
-
-
-# Acknowledgements (optional)
-
-The Acknowledgements should contain text acknowledging non-author contributors. Acknowledgements should be brief, and should not include thanks to anonymous referees and editors or effusive comments. Grant or contribution numbers may be acknowledged.
-
-# Using citations in this document:
-
-To automate citations, add the citation to in bibtex format to the file "references.bib". You can manually copy and paste the bibtex for each reference in, or you can search for it from within Rstudio. From within Rstudio, make sure you are editing this document using the "Visual" view (as opposed to "Source"). From the "Insert" drop-down menu, select "\@ Citation..." (shortcut: Cntrl-Shift-F8). This will open a tool where you can view all the citations in your reference.bib file as well as search multiple databases for references, automatically insert the bibtex for the reference into your references.bib file (and customize the unique identifier if you'd like) and insert the in-text citation into the DRR template.
-
-Once a reference is in your references.bib file, from within this template you can simply type the '\@' symbol and select which reference to insert in the text.
-
-If you need to edit how the citation is displayed after inserting it into the text, switch back to the "Source" view. Each bibtex citation should start with a unique identifier; the example reference in the supplied references.bib file has the unique identifier "\@article{Scott1994,". Using the "Source" view in Rstudio, insert the reference in your text, by combining the "at" symbol with the portion of the unique identifier after the curly bracket: @Scott1994 . You can put a citation in parentheses using square brackets: [@Scott1994]. This will be rendered as (Scott, et al. 1994) in text. You can add multiple authors works a single parenthetical citation by separating them with a semi-colon. You can suppress the author and cite just the year by using a - symbol before the \@ : [-@Scott1994].
-
-If you would like to format your citations manually, please feel free to do that instead. Make sure to examine the References section for examples of how to manually format each citation type.
-
-# Data Records (required)
-
-## Data Inputs (optional)
-
-If the data package being described was generated based on one or more pre-existing datasets, cite those datasets here.
-
-## Summary of Datasets Created (required)
-
-The Data Records section should be used to explain each data record associated with this work (for instance, a data package), including the DOI indicating where this information is stored, and provide an overview of the data files and their formats. Each external data record should be cited. Below is some sample text:
-
-This DRR describes the data package *`r dataPackageTitle`* which contains a metadata file and `r length(dataPackage_fileNames)` data files. These data were compiled and processed for dissemination by the National Park Service Inventory and Monitoring Division (IMD) and are available at `r dataPackageDOI` (see Table 1).
-
-```{r file_table, echo=FALSE}
-filelist <- data.frame(dataPackage_fileNames, dataPackage_fileSizes, dataPackage_fileDescript)
-
-knitr::kable(filelist, caption = paste0("**Table 1. ", dataPackageTitle, ": List of data files.**"), col.names = c("**File Name**", "**Size**", "**Description**"), format = "pandoc")
-```
-
-See Appendix for additional notes and examples.
-
-# Data Quality Evaluation (required)
-
-The Data Quality Evaluation section should present any analyses that are needed to support the technical quality of the dataset. This section may be supported by figures and tables, as needed. *This is a required section*; authors must provide information to justify the reliability of their data. Wherever possible & appropriate, data quality evaluation should be presented in the context of data standards and quality control procedures as prescribed in the project's quality assurance planning documentation.
-
-**Required elements for this section**
-
-*Required Table*
-
-```{r data_acceptance_criteria, echo = FALSE, eval = TRUE}
-# To turn off, set eval=FALSE.
-# Generates a table of acceptance criteria for each of the data quality fields in your data package. Mitigations taken when data did not meet the acceptance criteria should be described textually in the Data Quality Evaluation section.
-
-# Specify which columns in your data package are data quality fields in the data_quality_fields variable. In the example below, data quality fields/columns in the data package are listed in the format [FieldName]_flag. These data quality fields relate to the respective temporal, taxonomic, and geographic data.
-
-data_quality_fields <- c(
- "eventDate_flag",
- "scientificName_flag",
- "coordinate_flag"
- )
-
-# Brief description of the acceptance criteria for each respective data quality field. The order of the acceptance criteria must match the order of the data quality fields.
-
-data_quality_acceptance_criteria <- c(
- "Sampling event date within the start and end dates of the project",
- "Taxon exists within Integrated Taxonomic Information System and GBIF",
- "Sampling location is within the park unit boundaries"
- )
-
-data_criteria <- data.frame(data_quality_fields =
- str_remove(data_quality_fields, "_flag"),
- data_quality_acceptance_criteria)
-
-data_criteria %>%
- NMFSReports::format_cells(1:3, 1, "bold") %>%
- knitr::kable(caption = "**Table 2. Acceptance criteria for data evaluated.**",
- col.names=c("**Field**",
- "**Acceptance Criteria**"),
- format="pandoc",
- align = 'c')
-
-```
-
-```{r data_column_flagging, echo=FALSE, eval=TRUE}
-# To turn off, set eval=FALSE.
-# Generates a table summarizing QC at the column level within each file. All flagged columns are included. To add additional non-flagged columns, specify them with column names: cols=("my_unflagged_data1", "my_unflagged_data2)" or numbers: cols=c(1:4). All non-missing data in unflagged columns is assumed accepted. If a file has no flagged columns and no specified custom columns, all values for that data file will be listed as "NA".
-
-#set directory to the location of your data package:
-dc_flags <- QCkit::get_custom_flags(here::here("Untitled",
- "BICY_Example"),
- output="columns")
-dc_flags$`File Name` <- gsub(".csv", "", dc_flags$`File Name`)
-
-
-colnames(dc_flags)[2]<-paste0("Measure", "^1^")
-colnames(dc_flags)[4]<-paste0("A", "^2^")
-colnames(dc_flags)[8]<-paste0("% Accepted", "^3^")
-
-file_names <- NULL
-if (seq_along(unique(dc_flags$`File Name`)) < 2) {
- file_names <- 1
- dc_flags <- dc_flags[,-1]
-}
-
-#Generate the table:
-dc_flags %>%
- knitr::kable(
- caption = '**Table 3: Summary of data quality flags for each column [A – Accepted; AE – Accepted but Estimated; P – Provisional; R – Rejected.]**',
- format = "pandoc",
- digits = 2,
- align = 'c',
- col.names = if (is.null(file_names)) {
- c("**File Name**", "**Measure^1^**", "**Number of Records**",
- "**A^2^**", "**AE**", "**R**", "**P**", "**% Accepted^3^**")
- } else {
- c( "**Measure^1^**", "**Number of Records**", "**A^2^**", "**AE**",
- "**R**", "**P**", "**% Accepted^3^**")
- }) %>%
-kableExtra::add_footnote(
- c("The '_flag' suffix has been omitted from column names for brevity.",
- "All non-missing data in specified unflagged columns are considered accepted.",
- "% Accepted is calculated as the number of accepted (where A and AE are both considered accepted) divided by the total number of observations (including any missing observations."),
- notation = "number"
- )
-
-```
-
-```{r data_package_flagging, echo=FALSE, eval=TRUE}
-# To turn off, set eval=FALSE.
-# Generates a table summarizing data quality across all flagged columns of each data file. To add additional non-flagged columns, specify them with column names: cols=("my_unflagged_data1", "my_unflagged_data2)" or numbers: cols=c(1:4). All non-missing data in unflagged columns is assumed accepted. If a file has no flagged columns and no specified custom columns, all values for that data file will be listed as "NA".
-
-#set directory to the location of your data package
-dp_flags <- get_custom_flags(directory = here::here("Untitled", "BICY_Example"), output="files")
-
-#generate table:
-dp_flags %>%
- kableExtra::kbl(caption = '**Table 4: Summary of data quality flags for the data package [A – Accepted; AE – Accepted but Estimated; P – Provisional; R – Rejected.]**',
- format = "pandoc",
- col.names = c("**File Name**", "**A^1^**", "**AE**", "**R**", "**P**", "**% Accepted^2^**"),
- digits=2,
- align = 'c') %>%
- kableExtra::add_footnote(c("All non-missing data in specified unflagged columns are considered accepted.",
- "% Accepted is calculated as the number of accepted (where A and AE are both considered accepted) divided by the total number of observations plus the number of missing observations."), notation = "number")
-```
-
-Possible content **strongly Suggested to Include**
-
-- Occurrence rates or patterns in data that do not meet established standards or data quality objectives.
-
-Possible content **may include:**
-
-- experiments that support or validate the data-collection procedure(s) (e.g. negative controls, or an analysis of standards to confirm measurement linearity)
-- statistical analyses of experimental error and variation
-- general discussions of any procedures used to ensure reliable and unbiased data production, such as chain of custody procedures, blinding and randomization, sample tracking systems, etc.
-- any other information needed for assessment of technical rigor by reviewers/users
-
-Generally, this **should not include:**
-
-- follow-up experiments aimed at testing or supporting an interpretation of the data
-- statistical hypothesis testing (e.g. tests of statistical significance, identifying deferentially expressed genes, trend analysis, etc.)
-- exploratory computational analyses like clustering and annotation enrichment (e.g. GO analysis).
-
-*Stock Text to include:*
-
-The data within the data records listed above have been reviewed by staff in the NPS Inventory and Monitoring Division to ensure accuracy, completeness, and consistency with documented data quality standards, as well as for usability and reproducibility (Table 3). Of the data that were evaluated for quality, XX.X% met data quality standards. The *`r dataPackageTitle`* is suitable for its intended use as of the date of processing (`r Sys.Date()`).
-
-# Usage Notes (required)
-
-The Usage Notes should contain brief instructions to assist other researchers with reuse of the data. This may include discussion of software packages (with appropriate citations) that are suitable for analysing the assay data files, suggested downstream processing steps (e.g. normalization, etc.), or tips for integrating or comparing the data records with other datasets. Authors are encouraged to provide code, programs or data-processing workflows if they may help others understand or use the data.
-
-For studies involving privacy or safety controls on public access to the data, this section should describe in detail these controls, including how authors can apply to access the data, what criteria will be used to determine who may access the data, and any limitations on data use.
-
-## Acquiring the Data Package
-
-This data package is available for download from the NPS DataStore at `r dataPackageDOI` and can be directly imported into R data frames using the NPSutils package
-
-# Methods
-
-Ideally these methods are identical to the methods listed in the metadata accompanying the data package that the DRR describes.Future versions of this template will pull directly from metadata.
-
-The Methods should cite previous methods under use but also be detailed enough describing data production including experimental design, data acquisition assays, and any computational processing (e.g. normalization, image feature extraction) such that others can understand the methods and processing steps without referring to associated publications. Cite and link to the DataStore reference for the protocol for detailed methods sufficient for reproducing the experiment or observational study. Related methods should be grouped under corresponding subheadings where possible, and methods should be described in enough detail to allow other researchers to interpret the full study.
-
-Specific data inputs and outputs should be explicitly cited in the text and included in the References section below, following the same [Chicago Manual of Style author-date format](https://www.chicagomanualofstyle.org/tools_citationguide/citation-guide-2.html) in text. See the [USGS data citation guidelines](https://www.usgs.gov/data-management/data-citation) for examples of how to cite data in text and in the References section.
-
-Authors are encouraged to consider creating a figure that outlines the experimental workflow(s) used to generate and analyse the data output(s) (Figure 1).
-
-```{r figure1, echo=FALSE, fig.cap="Example general workflow to include in the methods section."}
-knitr::include_graphics(here::here("Untitled",
- "BICY_Example",
- "ProcessingWorkflow.png"))
-```
-
-## Data Collection and Sample Processing Methods (optional)
-
-Include a description of field methods and sample processing
-
-## Additional Data Sources (optional)
-
-Provide descriptions (with citations) of other data sources used.
-
-## Data Processing (required if done)
-
-Summarize process and results of any QC processes done that manipulate, change, or qualify data.
-
-## Code Availability (required)
-
-For all studies using custom code in the generation or processing of datasets, a statement must be included indicating whether and how the code can be accessed and any restrictions to access. This section should also include information on the versions of any software used, if relevant, and any specific variables or parameters used to generate, test, or process the current dataset. Actual analytical code should be provided in Appendices.
-
-# References (required)
-
-Provide sufficient information to locate the resource. If the citation has a DOI, include the DOI at the end of the citation, including the prefix. If you are citing documents that have unregistered DOIs (such as a data package that you are working on concurrently) still include the DOI. Electronic resources data and data services or web sites should include the date they were accessed. Keep the following line of code if you would like to automate generating and formatting references:
-
-::: {#refs}
-:::
-
-If you would like to manually format your references, delete the preceding two lines and use the following examples instead: Include bibliographic information for any works cited (including the data package the DRR is describing) in the above sections, using the standard *NPS NR Publication Series* referencing style.
-
-See the following examples:
-
-## Agency, Company, etc. as Author Examples
-
-Fung Associates Inc. and SWCA Environmental Consultants. 2010. Assessment of natural resources and watershed conditions for Kalaupapa National Historical Park. Natural Resource Report. NPS/NPRC/WRD/NRR—2010/261. National Park Service, Fort Collins, Colorado.
-
-Greater Yellowstone Whitebark Pine Monitoring Working Group. 2014. Monitoring whitebark pine in the Greater Yellowstone Ecosystem: 2013 annual report. Natural Resource Data Series. NPS/GRYN/NRDS—2014/631. National Park Service. Fort Collins, Colorado.
-
-National Park Service (NPS). 2016. State of the park report for Zion National Park. State of the Park Reports. No. 23. National Park Service. Washington, District of Columbia.
-
-U.S. Forest Service (USFS). 1993. ECOMAP. National hierarchical framework of ecological units. U. S. Forest Service, Washington, D.C.
-
-## Traditional Journal Article Examples
-
-Bradbury, J. W., S. L. Vehrencamp, K. E. Clifton, and L. M. Clifton. 1996. The relationship between bite rate and local forage abundance in wild Thompson’s gazelles. Ecology 77:2237–2255.
-
-Oakley, K. L., L. P. Thomas, and S. G. Fancy. 2003. Guidelines for long-term monitoring protocols. Wildlife Society Bulletin 31(4):1000–1003.
-
-Sawaya, M. A., T. K. Ruth, S. Creel, J. J. Rotella, J. B. Stetz, H. B. Quigley, and S. T. Kalinowski. 2011. Evaluation of noninvasive genetic sampling methods for cougars in Yellowstone National Park. The Journal of Wildlife Management 75(3):612–622.
-
-## Book Example
-
-Harvill, A. M., Jr., T. R. Bradley, C. E. Stevens, T. F. Wieboldt, D. M. E. Ware, D. W. Ogle, and G. W. Ramsey. 1992. Atlas of the Virginia flora, third edition. Virginia Botanical Associates, Farmville, Virginia.
-
-## Book Chapter Examples
-
-McCauly, E. 1984. The estimation of abundance and biomass of zooplankton in samples. Pages 228–265 in J. A. Dowling and F. H. Rigler, editors. A manual on methods for the assessment of secondary productivity in fresh waters. Blackwell Scientific, Oxford, UK.
-
-Watson, P. J. 2004. Of caves and shell mounds in west-central Kentucky. Pages 159–164 in Of caves and shell mounds. The University of Alabama Press, Tuscaloosa, Alabama.
-
-## Published Report Examples
-
-Bass, S., R. E. Gallipeau, Jr., M. Van Stappen, J. Kumer, M. Wessner, S. Petersburg, L. L. Hays, J. Milstone, M. Soukup, M. Fletcher, L. G. Adams, and others. 1988. Highlights of natural resource management 1987. National Park Service, Denver, Colorado.
-
-Holthausen, R. S., M. G. Raphael, K. S. McKelvey, E. D. Forsman, E. E. Starkey, and D. E. Seaman. 1994. The contribution of federal and nonfederal habitats to the persistence of the northern spotted owl on the Olympic Peninsula, Washington. General Technical Report PNW–GTR–352. U.S. Forest Service, Corvallis, Oregon.
-
-Jackson, L. L., and L. P. Gough. 1991. Seasonal and spatial biogeochemical trends for chaparral vegetation and soil geochemistry in the Santa Monica Mountains National Recreation Area. U.S. Geological Survey, Denver. Open File Report 91–0005.
-
-## Unpublished Report Examples
-
-Conant, B., and J. I. Hodges. 1995. Western brant population estimates. U.S. Fish and Wildlife Service Unpublished Report, Juneau, Alaska.
-
-Conant, B., and J. F. Voelzer. 2001. Winter waterfowl survey: Mexico west coast and Baja California. U.S. Fish and Wildlife Service Unpublished Report, Juneau, Alaska.
-
-## Thesis/Dissertation Examples
-
-Diong, C. H. 1982. Population and biology of the feral pig (Sus scrofa L) in Kipahulu Valley, Mau’i. Dissertation. University of Hawai’i, Honolulu, Hawai’i.
-
-McTigue, K. M. 1992. Nutrient pulses and herbivory: Integrative control of primary producers in lakes. Thesis. University of Wisconsin, Madison, Wisconsin.
-
-## Conference Proceedings Examples
-
-Gunther, K. A. 1994. Changing problems in bear management: Yellowstone National Park twenty-plus years after the dumps. Ninth International Conference on Bear Research and Management. Missoula, MT, International Association for Bear Research and Management, Bozeman, Montana, February 1992:549–560.
-
-Webb, J. R., and J. N. Galloway. 1991. Potential acidification of streams in Mid-Appalachian Highlands: A problem with generalized assessments. Southern Appalachian Man and Biosphere Conference. Gatlinburg, Tennessee.
-
-## General Internet Examples
-
-Colorado Native Plant Society. 2016. Colorado Native Plant Society website. Available at: (accessed 07 March 2016).
-
-National Park Service (NPS). 2016a. IRMA Portal (Integrated Resource Management Applications) website. Available at: (accessed 07 March 2016).
-
-National Park Service (NPS). 2016b. Natural Resource Publications Management website. Available at: (accessed 07 March 2016).
-
-United Sates Fish and Wildlife Service (USFWS). 2016. Endangered Species website. Available at: (accessed 07 March 2016).
-
-## Online Data Warehouse Sites (sites that allow you see and download data from multiple sources)
-
-National Oceanographic and Atmospheric Association (NOAA). 2016. NOAA National Climatic Data Center website. Available at: (accessed 07 March 2016).
-
-Environmental Protection Agency (EPA). 2016. Storage and Retrieval Data Warehouse website (STORET). Available at: (accessed 07 March 2016).
-
-National Park Service (NPS). 2016c. NPScape Landscape Dynamics Metric Viewer website. Available at: (accessed 07 March 2016).
-
-National Park Service (NPS). 2016d. NPSpecies online application. Available at: (accessed 07 March 2016).
-
-United States Geologic Survey (USGS). 2016. BioData - Aquatic Bioassessment Data for the Nation. Available at: (accessed 07 March 2016).
-
-# Appendix A. Code Listing
-
-In most cases, Code listing is not required. If all QA/QC and data manipulations were performed elsewhere, you should cite that code in the methods (and leave the "Listing" code chunk as the default settings: eval=FALSE and echo=FALSE). If you have developed custom scripts, you can add those to DataStore with the reference type "Script" and cite them in the DRR. Some people have developed code to perform QA/QC or data manipulation within the DRR itself. In that case, you must set the "Listing" code chunk to eval=TRUE and echo=TRUE to fully document the QA/QC process.
-
-```{r listing, ref.label=knitr::all_labels(), echo=TRUE, eval=TRUE}
-
-```
-
-\pagebreak
-
-# Appendix B. Session and Version Information
-
-In most cases you do not need to report session info (leave the "session-info" code chunk parameters in their default state: eval=FALSE). Session and version information is only necessary if you have set the "Listing" code chunk to eval=TRUE in appendix A. In that case, change the "session-info" code chunk parameters to eval=TRUE.
-
-```{r session_info, eval=TRUE, echo=FALSE, cache=FALSE}
-sessionInfo()
-Sys.time()
-```
diff --git a/docs/404.html b/docs/404.html
index bbe9a71..c1a016f 100644
--- a/docs/404.html
+++ b/docs/404.html
@@ -101,12 +101,12 @@
Baker R, Patterson J, DeVivo J, Quevedo I (2024).
+
Baker R, Patterson J, DeVivo J, Quevedo I, Wright s (2024).
QCkit: NPS Inventory and Monitoring Quality Control Toolkit.
R package version 0.1.7, https://github.com/nationalparkservice/QCkit/.
@Manual{,
title = {QCkit: NPS Inventory and Monitoring Quality Control Toolkit},
- author = {Robert Baker and Judd Patterson and Joe DeVivo and Issac Quevedo},
+ author = {Robert Baker and Judd Patterson and Joe DeVivo and Issac Quevedo and sarah Wright},
year = {2024},
note = {R package version 0.1.7},
url = {https://github.com/nationalparkservice/QCkit/},
@@ -116,11 +120,11 @@
2024-04-17 * Major updates to the DRR template including: using snake case instead of camel case for variables; updating Table 3 to only display filenames only when there are multiple files, fixed multiple issues with footnotes, added citations to NPSdataverse packages, added a section that prints the R code needed to download the data package and load it in to R. * Updated the DRR documentation to account for new variable names.
+
2024-04-18 * Added the function generate_ll_from_utm() which supersedes convert_utm_to_ll() and improves upon it in several ways, included accepting a column of UTMs and also returns a column of CRS along with the decimal degrees latitude and longitude. 2024-04-17 * Major updates to the DRR template including: using snake case instead of camel case for variables; updating Table 3 to only display filenames only when there are multiple files, fixed multiple issues with footnotes, added citations to NPSdataverse packages, added a section that prints the R code needed to download the data package and load it in to R. * Updated the DRR documentation to account for new variable names.