diff --git a/DESCRIPTION b/DESCRIPTION
index fa94682..73c23eb 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -16,6 +16,8 @@ Description: NetCoMi offers functions for constructing, analyzing, and comparing
microbial compositional data. It also includes functionality for
constructing differential association networks.
License: GPL-3 + file LICENSE
+URL: https://stefpeschel.github.io/NetCoMi, https://github.com/stefpeschel/NetCoMi
+BugReports: https://github.com/stefpeschel/NetCoMi/issues
Depends: SpiecEasi (>= 1.0.6)
Imports:
Biobase,
@@ -70,5 +72,4 @@ Suggests:
RdMacros: Rdpack
Encoding: UTF-8
RoxygenNote: 7.3.2
-URL: https://stefpeschel.github.io/NetCoMi/
VignetteBuilder: knitr
diff --git a/R/NetCoMi-package.R b/R/NetCoMi-package.R
index 31c2e8b..a74a4d4 100644
--- a/R/NetCoMi-package.R
+++ b/R/NetCoMi-package.R
@@ -15,4 +15,5 @@
#' (inspiration and theoretical background)
#' \item Anastasiia Holovchak (package testing and editing)
#' }
+#' @keywords internal
"_PACKAGE"
\ No newline at end of file
diff --git a/R/dot-boottest.R b/R/dot-boottest.R
index 06522d0..f6e3488 100644
--- a/R/dot-boottest.R
+++ b/R/dot-boottest.R
@@ -26,6 +26,7 @@
#' @return \tabular{ll}{ \code{pvals}\tab calculated p-values\cr
#' \code{corrMat}\tab estimated correlation matrix}
#' @references{\insertRef{friedman2012inferring}{NetCoMi}}
+#' @keywords internal
.boottest <- function(countMat, assoMat, nboot = 1000, measure, measurePar,
cores = 4, logFile = NULL, verbose = TRUE, seed = NULL,
diff --git a/R/dot-calcAssociation.R b/R/dot-calcAssociation.R
index b2a3240..1923193 100644
--- a/R/dot-calcAssociation.R
+++ b/R/dot-calcAssociation.R
@@ -18,6 +18,7 @@
#' @importFrom SPRING SPRING
#' @importFrom vegan vegdist
#' @import mixedCCA
+#' @keywords internal
.calcAssociation <- function(countMat, measure, measurePar, verbose) {
diff --git a/R/dot-calcDiffProps.R b/R/dot-calcDiffProps.R
index ac2cb78..7ec3c28 100644
--- a/R/dot-calcDiffProps.R
+++ b/R/dot-calcDiffProps.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.calcDiffProps <- function(adja1,
adja2,
dissMat1,
diff --git a/R/dot-calcJaccard.R b/R/dot-calcJaccard.R
index b4a762d..8209202 100644
--- a/R/dot-calcJaccard.R
+++ b/R/dot-calcJaccard.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.calcJaccard <- function(group1, group2, sigTest, greater) {
N <- length(union(group1, group2))
C <- length(intersect(group1, group2))
diff --git a/R/dot-calcProps.R b/R/dot-calcProps.R
index 4f0bdfd..b9b00c8 100644
--- a/R/dot-calcProps.R
+++ b/R/dot-calcProps.R
@@ -71,7 +71,7 @@
#' @importFrom igraph graph_from_adjacency_matrix decompose.graph
#' @importFrom stats hclust as.dist cutree qlnorm quantile
#' @importFrom pulsar natural.connectivity
-
+#' @keywords internal
.calcProps <- function(adjaMat,
dissMat,
diff --git a/R/dot-checkArgsPlotDiff.R b/R/dot-checkArgsPlotDiff.R
index 4e88475..6eae1ce 100644
--- a/R/dot-checkArgsPlotDiff.R
+++ b/R/dot-checkArgsPlotDiff.R
@@ -1,5 +1,5 @@
# Argument checking for function plot.diffnet()
-
+#' @keywords internal
.checkArgsPlotDiff<- function(args) {
# Variable for collecting error messages
diff --git a/R/dot-checkPackNetConst.R b/R/dot-checkPackNetConst.R
index 064d03b..039fa63 100644
--- a/R/dot-checkPackNetConst.R
+++ b/R/dot-checkPackNetConst.R
@@ -1,3 +1,4 @@
+#' @keywords internal
# Check whether packages needed for the current netConstruct() run
# are available.
diff --git a/R/dot-checkPlausNetConst.R b/R/dot-checkPlausNetConst.R
index efa6219..c96c669 100644
--- a/R/dot-checkPlausNetConst.R
+++ b/R/dot-checkPlausNetConst.R
@@ -1,3 +1,4 @@
+#' @keywords internal
# Plausibility checking for arguments of netConstruct()
.checkPlausNetConst <- function(dataType,
diff --git a/R/dot-filterEdges.R b/R/dot-filterEdges.R
deleted file mode 100644
index ae2f0b7..0000000
--- a/R/dot-filterEdges.R
+++ /dev/null
@@ -1,29 +0,0 @@
-.filterEdges <- function(adja, assoEst, dissEst, edgeFilter, edgeFilterPar) {
- if (edgeFilter == "threshold") {
-
- if (is.null(dissEst)) { # if association network
- assoEst <- assoEst[rownames(adja), colnames(adja)]
-
- adja[abs(assoEst) < edgeFilterPar] <- 0
-
- } else { # if dissimilarity network
- dissEst <- dissEst[rownames(adja), colnames(adja)]
-
- adja[dissEst > edgeFilterPar] <- 0
- }
-
- } else if (edgeFilter == "highestWeight") {
-
- adjaSort <- sort(adja[lower.tri(adja)], decreasing = TRUE)
-
- if (edgeFilterPar > length(adjaSort)) {
- stop(paste0("'edgeFilterPar' must be smaller than maximum number ",
- "of edges (", length(adjaSort), ")."))
- }
-
- adja[adja < adjaSort[edgeFilterPar]] <- 0
- }
-
- return(adja)
-}
-
diff --git a/R/dot-filterFunctions.R b/R/dot-filterFunctions.R
new file mode 100644
index 0000000..f1f2af6
--- /dev/null
+++ b/R/dot-filterFunctions.R
@@ -0,0 +1,276 @@
+#' @keywords internal
+.filterEdges <- function(adja, assoEst, dissEst, edgeFilter, edgeFilterPar) {
+ if (edgeFilter == "threshold") {
+
+ if (is.null(dissEst)) { # if association network
+ assoEst <- assoEst[rownames(adja), colnames(adja)]
+
+ adja[abs(assoEst) < edgeFilterPar] <- 0
+
+ } else { # if dissimilarity network
+ dissEst <- dissEst[rownames(adja), colnames(adja)]
+
+ adja[dissEst > edgeFilterPar] <- 0
+ }
+
+ } else if (edgeFilter == "highestWeight") {
+
+ adjaSort <- sort(adja[lower.tri(adja)], decreasing = TRUE)
+
+ if (edgeFilterPar > length(adjaSort)) {
+ stop(paste0("'edgeFilterPar' must be smaller than maximum number ",
+ "of edges (", length(adjaSort), ")."))
+ }
+
+ adja[adja < adjaSort[edgeFilterPar]] <- 0
+ }
+
+ return(adja)
+}
+
+#' @keywords internal
+.filterNodes <- function(adja, nodeFilter, nodeFilterPar, layout,
+ degree, between, close, eigen, cluster) {
+
+ adja.alltax <- adja
+
+ if (!is.null(layout) & is.matrix(layout)) {
+ keep <- colnames(adja)[which(colnames(adja) %in% rownames(layout))]
+
+ } else if (nodeFilter != "none") {
+
+ if (nodeFilter == "highestConnect") {
+ adja.tmp <- adja
+
+ diag(adja.tmp) <- 0
+ conct <- Matrix::rowSums(abs(adja.tmp))
+
+ conct <- names(sort(conct))[1:nodeFilterPar]
+ keep <- colnames(adja)[which(colnames(adja) %in% conct)]
+
+ } else if (nodeFilter == "highestDegree") {
+ sel <- names(sort(degree, decreasing = TRUE)[1:nodeFilterPar])
+ keep <- colnames(adja)[which(colnames(adja) %in% sel)]
+
+ } else if (nodeFilter == "highestBetween") {
+ sel <- names(sort(between, decreasing = TRUE)[1:nodeFilterPar])
+
+ keep <- colnames(adja)[which(colnames(adja) %in% sel)]
+
+ } else if (nodeFilter == "highestClose") {
+ sel <- names(sort(close, decreasing = TRUE)[1:nodeFilterPar])
+
+ keep <- colnames(adja)[which(colnames(adja) %in% sel)]
+
+ } else if (nodeFilter == "highestEigen") {
+ sel <- names(sort(eigen, decreasing = TRUE)[1:nodeFilterPar])
+
+ keep <- colnames(adja)[which(colnames(adja) %in% sel)]
+
+ } else if (nodeFilter == "clustTaxon") {
+ stopifnot(all(nodeFilterPar %in% colnames(adja)))
+
+ selClust <- cluster[nodeFilterPar]
+ keep <- names(cluster[cluster %in% selClust])
+ #keep <- names(cluster) %in% selnodes
+
+ } else if (nodeFilter == "clustMin") {
+ clusttab <- table(cluster)
+ selclust <- names(clusttab[clusttab >= nodeFilterPar &
+ names(clusttab) != 0])
+
+ keep <- names(cluster[cluster %in% selclust])
+
+ } else if (nodeFilter == "names") {
+ stopifnot(all(nodeFilterPar %in% colnames(adja)))
+ keep <- colnames(adja)[which(colnames(adja) %in% nodeFilterPar)]
+ }
+
+ } else {
+ keep <- colnames(adja)
+ }
+
+ # names_alltaxa <- matrix(NA, nrow = ncol(adja.alltax1), ncol = 2)
+ # names_alltaxa[, 1] <- colnames(adja.alltax1)
+ # names_alltaxa[, 2] <- colnames(x$input$adja1)
+ # rownames(names_alltaxa) <- names_alltaxa[,1]
+ #
+ # unitnames <- union(colnames(adja), colnames(adja2))
+ # names_selected_taxa <- matrix(NA, nrow = length(unitnames), ncol = 2)
+ # names_selected_taxa[, 1] <- unitnames
+ # names_selected_taxa[, 2] <- names_alltaxa[unitnames, 2]
+ # rownames(names_alltaxa) <- NULL
+ #
+ # if (labelsToFile == "all") {
+ # write.matrix(names_alltaxa, file="taxalabels.txt")
+ # } else if (labelsToFile == "selected") {
+ # write.matrix(names_selected_taxa, file="taxalabels.txt")
+ # }
+ #
+ #
+ # colnames(names_alltaxa) <- colnames(names_selected_taxa) <- c("shortened",
+ # "original")
+ #
+ # taxalabels <- list(all_taxa = names_alltaxa,
+ # selected_taxa = names_selected_taxa)
+
+ rm(adja.alltax)
+
+ return(keep = keep)
+
+}
+
+
+#' @keywords internal
+.filterSamples <- function(countMat, filter, filterParam) {
+
+ countMat_orig <- countMat
+
+ if ("highestFreq" %in% filter) {
+ highestFreq <-
+ ifelse(is.null(filterParam$highestFreq),
+ 100,
+ filterParam$highestFreq)
+ highestFreq <- min(nrow(countMat), highestFreq)
+
+ if (length(filter) > 1) {
+ stop('Filter method "highestFreq" not comparable with other ',
+ 'filter methods.')
+ }
+ names_highFreq <- names(sort(Matrix::rowSums(countMat),
+ decreasing = TRUE)[1:highestFreq])
+ #rmRows <- which(!rownames(countMat) %in% names_highFreq)
+ keepRows <- names_highFreq
+
+ } else {
+
+ if ("totalReads" %in% filter) {
+ totalReads <-
+ ifelse(is.null(filterParam$totalReads),
+ 1000,
+ filterParam$totalReads)
+ idx_totalreads <- which(Matrix::rowSums(countMat) >= totalReads)
+ } else {
+ idx_totalreads <- 1:nrow(countMat)
+ }
+
+ if ("numbTaxa" %in% filter) {
+ numbTaxa <- ifelse(is.null(filterParam$numbTaxa),
+ 0.1,
+ filterParam$numbTaxa)
+ stopifnot(numbTaxa > 0)
+ if (numbTaxa < 1) {
+ numbTaxa <- round(numbTaxa * nrow(countMat))
+ }
+
+ idx_numbTaxa = numeric(0)
+
+ for (i in 1:nrow(countMat)) {
+ if (length(which(!countMat[i,] == 0)) >= numbTaxa) {
+ idx_numbTaxa <- append(idx_numbTaxa, i)
+ }
+ }
+ } else {
+ idx_numbTaxa <- 1:nrow(countMat)
+ }
+
+ keepRows <- rownames(countMat[intersect(idx_totalreads, idx_numbTaxa), ])
+ }
+
+ return(keepRows)
+}
+
+
+#' @keywords internal
+.filterTaxa <- function(countMat, filter, filterParam) {
+
+ countMat_orig <- countMat
+ #---------------------------------------------------------------------------
+ # filter taxa of interest
+
+ if ("highestVar" %in% filter) {
+ highestVar <-
+ ifelse(is.null(filterParam$highestVar),
+ 100,
+ filterParam$highestVar)
+
+ highestVar <- min(ncol(countMat), highestVar)
+
+ if (length(filter) > 1) {
+ stop('Filter method "highestVar" not comparable with other ',
+ 'filter methods.')
+ }
+ var_taxa <- apply(countMat, 2, var)
+ keepCols <- names(sort(var_taxa, decreasing = TRUE)[1:highestVar])
+
+ #countMat <- countMat[, names_highvar]
+
+ } else if ("highestFreq" %in% filter) {
+ highestFreq <-
+ ifelse(is.null(filterParam$highestFreq),
+ 100,
+ filterParam$highestFreq)
+
+ highestFreq <- min(ncol(countMat), highestFreq)
+
+ if (length(filter) > 1) {
+ stop('Filter method "highestFreq" not comparable with other ',
+ 'filter methods.')
+ }
+ keepCols <- names(sort(Matrix::colSums(countMat),
+ decreasing = TRUE)[1:highestFreq])
+ #countMat <- countMat[, names_highFreq]
+
+ } else {
+ if ("relFreq" %in% filter) {
+ relFreq <-
+ ifelse(is.null(filterParam$relFreq),
+ 0.01,
+ filterParam$relFreq)
+ idx_relfreq <- which(Matrix::colSums(countMat) >=
+ sum(Matrix::colSums(countMat)) * relFreq)
+ } else {
+ idx_relfreq <- 1:ncol(countMat)
+ }
+
+
+ if ("totalReads" %in% filter) {
+ totalReads <-
+ ifelse(is.null(filterParam$totalReads),
+ 1000,
+ filterParam$totalReads)
+ idx_totalreads <- which(Matrix::colSums(countMat) >= totalReads)
+ } else {
+ idx_totalreads <- 1:ncol(countMat)
+ }
+
+ if ("numbSamp" %in% filter) {
+ numbSamp <- ifelse(is.null(filterParam$numbSamp), 0.1,
+ filterParam$numbSamp)
+
+ stopifnot(numbSamp > 0)
+
+ if (numbSamp < 1) {
+ numbSamp <- round(numbSamp * nrow(countMat))
+ }
+
+ idx_numbSamp = numeric(0)
+
+ for (i in 1:ncol(countMat)) {
+ if (length(which(!countMat[, i] == 0)) >= numbSamp) {
+ idx_numbSamp <- append(idx_numbSamp, i)
+ }
+ }
+ } else {
+ idx_numbSamp <- 1:ncol(countMat)
+ }
+
+ keepCols <- colnames(countMat[, intersect(intersect(idx_relfreq,
+ idx_totalreads),
+ idx_numbSamp)])
+ }
+
+ #rmCols <- which(!colnames(countMat_orig) %in% colnames(countMat))
+
+ return(keepCols)
+}
diff --git a/R/dot-filterNodes.R b/R/dot-filterNodes.R
deleted file mode 100644
index ca6d766..0000000
--- a/R/dot-filterNodes.R
+++ /dev/null
@@ -1,91 +0,0 @@
-.filterNodes <- function(adja, nodeFilter, nodeFilterPar, layout,
- degree, between, close, eigen, cluster) {
-
- adja.alltax <- adja
-
- if (!is.null(layout) & is.matrix(layout)) {
- keep <- colnames(adja)[which(colnames(adja) %in% rownames(layout))]
-
- } else if (nodeFilter != "none") {
-
- if (nodeFilter == "highestConnect") {
- adja.tmp <- adja
-
- diag(adja.tmp) <- 0
- conct <- Matrix::rowSums(abs(adja.tmp))
-
- conct <- names(sort(conct))[1:nodeFilterPar]
- keep <- colnames(adja)[which(colnames(adja) %in% conct)]
-
- } else if (nodeFilter == "highestDegree") {
- sel <- names(sort(degree, decreasing = TRUE)[1:nodeFilterPar])
- keep <- colnames(adja)[which(colnames(adja) %in% sel)]
-
- } else if (nodeFilter == "highestBetween") {
- sel <- names(sort(between, decreasing = TRUE)[1:nodeFilterPar])
-
- keep <- colnames(adja)[which(colnames(adja) %in% sel)]
-
- } else if (nodeFilter == "highestClose") {
- sel <- names(sort(close, decreasing = TRUE)[1:nodeFilterPar])
-
- keep <- colnames(adja)[which(colnames(adja) %in% sel)]
-
- } else if (nodeFilter == "highestEigen") {
- sel <- names(sort(eigen, decreasing = TRUE)[1:nodeFilterPar])
-
- keep <- colnames(adja)[which(colnames(adja) %in% sel)]
-
- } else if (nodeFilter == "clustTaxon") {
- stopifnot(all(nodeFilterPar %in% colnames(adja)))
-
- selClust <- cluster[nodeFilterPar]
- keep <- names(cluster[cluster %in% selClust])
- #keep <- names(cluster) %in% selnodes
-
- } else if (nodeFilter == "clustMin") {
- clusttab <- table(cluster)
- selclust <- names(clusttab[clusttab >= nodeFilterPar &
- names(clusttab) != 0])
-
- keep <- names(cluster[cluster %in% selclust])
-
- } else if (nodeFilter == "names") {
- stopifnot(all(nodeFilterPar %in% colnames(adja)))
- keep <- colnames(adja)[which(colnames(adja) %in% nodeFilterPar)]
- }
-
- } else {
- keep <- colnames(adja)
- }
-
- # names_alltaxa <- matrix(NA, nrow = ncol(adja.alltax1), ncol = 2)
- # names_alltaxa[, 1] <- colnames(adja.alltax1)
- # names_alltaxa[, 2] <- colnames(x$input$adja1)
- # rownames(names_alltaxa) <- names_alltaxa[,1]
- #
- # unitnames <- union(colnames(adja), colnames(adja2))
- # names_selected_taxa <- matrix(NA, nrow = length(unitnames), ncol = 2)
- # names_selected_taxa[, 1] <- unitnames
- # names_selected_taxa[, 2] <- names_alltaxa[unitnames, 2]
- # rownames(names_alltaxa) <- NULL
- #
- # if (labelsToFile == "all") {
- # write.matrix(names_alltaxa, file="taxalabels.txt")
- # } else if (labelsToFile == "selected") {
- # write.matrix(names_selected_taxa, file="taxalabels.txt")
- # }
- #
- #
- # colnames(names_alltaxa) <- colnames(names_selected_taxa) <- c("shortened",
- # "original")
- #
- # taxalabels <- list(all_taxa = names_alltaxa,
- # selected_taxa = names_selected_taxa)
-
- rm(adja.alltax)
-
- return(keep = keep)
-
-}
-
diff --git a/R/dot-filterSamples.R b/R/dot-filterSamples.R
deleted file mode 100644
index 5ce877c..0000000
--- a/R/dot-filterSamples.R
+++ /dev/null
@@ -1,57 +0,0 @@
-.filterSamples <- function(countMat, filter, filterParam) {
-
- countMat_orig <- countMat
-
- if ("highestFreq" %in% filter) {
- highestFreq <-
- ifelse(is.null(filterParam$highestFreq),
- 100,
- filterParam$highestFreq)
- highestFreq <- min(nrow(countMat), highestFreq)
-
- if (length(filter) > 1) {
- stop('Filter method "highestFreq" not comparable with other ',
- 'filter methods.')
- }
- names_highFreq <- names(sort(Matrix::rowSums(countMat),
- decreasing = TRUE)[1:highestFreq])
- #rmRows <- which(!rownames(countMat) %in% names_highFreq)
- keepRows <- names_highFreq
-
- } else {
-
- if ("totalReads" %in% filter) {
- totalReads <-
- ifelse(is.null(filterParam$totalReads),
- 1000,
- filterParam$totalReads)
- idx_totalreads <- which(Matrix::rowSums(countMat) >= totalReads)
- } else {
- idx_totalreads <- 1:nrow(countMat)
- }
-
- if ("numbTaxa" %in% filter) {
- numbTaxa <- ifelse(is.null(filterParam$numbTaxa),
- 0.1,
- filterParam$numbTaxa)
- stopifnot(numbTaxa > 0)
- if (numbTaxa < 1) {
- numbTaxa <- round(numbTaxa * nrow(countMat))
- }
-
- idx_numbTaxa = numeric(0)
-
- for (i in 1:nrow(countMat)) {
- if (length(which(!countMat[i,] == 0)) >= numbTaxa) {
- idx_numbTaxa <- append(idx_numbTaxa, i)
- }
- }
- } else {
- idx_numbTaxa <- 1:nrow(countMat)
- }
-
- keepRows <- rownames(countMat[intersect(idx_totalreads, idx_numbTaxa), ])
- }
-
- return(keepRows)
-}
diff --git a/R/dot-filterTaxa.R b/R/dot-filterTaxa.R
deleted file mode 100644
index 9de96b2..0000000
--- a/R/dot-filterTaxa.R
+++ /dev/null
@@ -1,92 +0,0 @@
-.filterTaxa <- function(countMat, filter, filterParam) {
-
- countMat_orig <- countMat
- #---------------------------------------------------------------------------
- # filter taxa of interest
-
- if ("highestVar" %in% filter) {
- highestVar <-
- ifelse(is.null(filterParam$highestVar),
- 100,
- filterParam$highestVar)
-
- highestVar <- min(ncol(countMat), highestVar)
-
- if (length(filter) > 1) {
- stop('Filter method "highestVar" not comparable with other ',
- 'filter methods.')
- }
- var_taxa <- apply(countMat, 2, var)
- keepCols <- names(sort(var_taxa, decreasing = TRUE)[1:highestVar])
-
- #countMat <- countMat[, names_highvar]
-
- } else if ("highestFreq" %in% filter) {
- highestFreq <-
- ifelse(is.null(filterParam$highestFreq),
- 100,
- filterParam$highestFreq)
-
- highestFreq <- min(ncol(countMat), highestFreq)
-
- if (length(filter) > 1) {
- stop('Filter method "highestFreq" not comparable with other ',
- 'filter methods.')
- }
- keepCols <- names(sort(Matrix::colSums(countMat),
- decreasing = TRUE)[1:highestFreq])
- #countMat <- countMat[, names_highFreq]
-
- } else {
- if ("relFreq" %in% filter) {
- relFreq <-
- ifelse(is.null(filterParam$relFreq),
- 0.01,
- filterParam$relFreq)
- idx_relfreq <- which(Matrix::colSums(countMat) >=
- sum(Matrix::colSums(countMat)) * relFreq)
- } else {
- idx_relfreq <- 1:ncol(countMat)
- }
-
-
- if ("totalReads" %in% filter) {
- totalReads <-
- ifelse(is.null(filterParam$totalReads),
- 1000,
- filterParam$totalReads)
- idx_totalreads <- which(Matrix::colSums(countMat) >= totalReads)
- } else {
- idx_totalreads <- 1:ncol(countMat)
- }
-
- if ("numbSamp" %in% filter) {
- numbSamp <- ifelse(is.null(filterParam$numbSamp), 0.1,
- filterParam$numbSamp)
-
- stopifnot(numbSamp > 0)
-
- if (numbSamp < 1) {
- numbSamp <- round(numbSamp * nrow(countMat))
- }
-
- idx_numbSamp = numeric(0)
-
- for (i in 1:ncol(countMat)) {
- if (length(which(!countMat[, i] == 0)) >= numbSamp) {
- idx_numbSamp <- append(idx_numbSamp, i)
- }
- }
- } else {
- idx_numbSamp <- 1:ncol(countMat)
- }
-
- keepCols <- colnames(countMat[, intersect(intersect(idx_relfreq,
- idx_totalreads),
- idx_numbSamp)])
- }
-
- #rmCols <- which(!colnames(countMat_orig) %in% colnames(countMat))
-
- return(keepCols)
-}
diff --git a/R/dot-getClustCols.R b/R/dot-getClustCols.R
index e3b72d0..faefb56 100644
--- a/R/dot-getClustCols.R
+++ b/R/dot-getClustCols.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.getClustCols <- function(clust1, clust2, adja1, adja2, kept1, kept2, isempty1,
isempty2, colorVec, sameClustCol, sameColThresh,
twoNets) {
diff --git a/R/dot-getNodeSize.R b/R/dot-getNodeSize.R
index ec8837a..b406c1c 100644
--- a/R/dot-getNodeSize.R
+++ b/R/dot-getNodeSize.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.getNodeSize <- function(nodeSize, normPar, nodeSizeSpread, adja, countMat,
normCounts, assoType, kept, cexNodes, cexHubs, hubs,
highlightHubs, degree, between, close, eigen) {
diff --git a/R/dot-getPermGroupMat.R b/R/dot-getPermGroupMat.R
index 77f7a28..f990a99 100644
--- a/R/dot-getPermGroupMat.R
+++ b/R/dot-getPermGroupMat.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.getPermGroupMat <- function(n1, n2, n, nPerm, matchDesign) {
# group vector: n1 samples in group1 and n2 samples in group 2
diff --git a/R/dot-getVecNames.R b/R/dot-getVecNames.R
index ad9eb60..93c5c07 100644
--- a/R/dot-getVecNames.R
+++ b/R/dot-getVecNames.R
@@ -8,6 +8,7 @@
#'
#' @param x symmetric matrix for which the column and row names should be
#' returned as a vector
+#' @keywords internal
.getVecNames <- function(x) {
diff --git a/R/dot-normCounts.R b/R/dot-normCounts.R
index eeb784e..db2cef7 100644
--- a/R/dot-normCounts.R
+++ b/R/dot-normCounts.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.normCounts <- function(countMat, normMethod, normParam, zeroMethod, needfrac,
verbose) {
diff --git a/R/dot-permTestDiffAsso.R b/R/dot-permTestDiffAsso.R
index 1d05ca6..1d91d7c 100644
--- a/R/dot-permTestDiffAsso.R
+++ b/R/dot-permTestDiffAsso.R
@@ -87,6 +87,7 @@
#' \insertRef{knijnenburg2009fewer}{NetCoMi}\cr\cr
#'
#' @seealso \code{\link{diffnet}}
+#' @keywords internal
.permTestDiffAsso <- function(countMat1, countMat2, countsJoint,
normCounts1, normCounts2,
diff --git a/R/dot-sparsify.R b/R/dot-sparsify.R
index c15c50b..58e7779 100644
--- a/R/dot-sparsify.R
+++ b/R/dot-sparsify.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.sparsify <- function(assoMat,
countMat,
sampleSize,
diff --git a/R/dot-zeroTreat.R b/R/dot-zeroTreat.R
index 2ce1f7d..a5fb5e9 100644
--- a/R/dot-zeroTreat.R
+++ b/R/dot-zeroTreat.R
@@ -1,3 +1,4 @@
+#' @keywords internal
.zeroTreat <- function(countMat, zeroMethod, zeroParam,
needfrac, needint, verbose) {
diff --git a/_pkgdown.yml b/_pkgdown.yml
index b3a4f4d..c10ed29 100644
--- a/_pkgdown.yml
+++ b/_pkgdown.yml
@@ -1,17 +1,73 @@
url: https://stefpeschel.github.io/NetCoMi/
template:
bootstrap: 5
+ light-switch: true
+ bootswatch: simplex
+ bslib:
+ primary: "#a2bf48"
+
+navbar:
+ structure:
+ left: [intro, reference, articles, news]
+ right: [search, github, lightswitch]
+ bg: primary
+
+home:
+ links:
+ - text: Orchestrating Microbiome Analysis
+ href: https://microbiome.github.io/OMA/docs/devel/pages/network_learning.html
articles:
- title: Main functionality
navbar: ~
contents:
- - netcompare
+ - net_comparison
- diffnet
- dissimilarity_networks
+ - create_asso_perm
- title: More examples
navbar: More examples
contents:
- asso_as_input
- - soil_example
\ No newline at end of file
+ - cross_domain_networks
+ - soil_example
+
+reference:
+- title: Main functions
+ desc: Main functions to follow the workflow of constructing, analyzing, and comparing microbiome networks.
+- contents:
+ - netConstruct
+ - netAnalyze
+ - netCompare
+ - diffnet
+ - installNetCoMiPacks
+- title: Association estimation
+ desc: Association estimation methods implemented in NetCoMi.
+- contents:
+ - cclasso
+ - gcoda
+- title: Helpers
+ desc: A bunch of helpful functions that are mainly used by other functions.
+- contents:
+ - createAssoPerm
+ - colToTransp
+ - calcGCD
+ - calcGCM
+ - editLabels
+ - multAdjust
+ - plotHeat
+ - renameTaxa
+ - testGCM
+- title: S3 methods
+- contents:
+ - plot.diffnet
+ - plot.microNetProps
+ - print.GCD
+ - summary.microNetComp
+ - summary.microNetProps
+
+authors:
+ Stefanie Peschel:
+ href: https://stefpeschel.de/
+
\ No newline at end of file
diff --git a/inst/CITATION b/inst/CITATION
index 75bddb6..be8ffa6 100644
--- a/inst/CITATION
+++ b/inst/CITATION
@@ -1,8 +1,8 @@
-citHeader("To cite NetCoMi in publications use:")
+citHeader("To cite the package 'NetCoMi' in publications use:")
title <- "{NetCoMi: Network Construction and Comparison for Microbiome Data}"
title_text <- "NetCoMi: Network Construction and Comparison for Microbiome Data"
-url <- meta$URL
+url <- "https://stefpeschel.github.io/NetCoMi"
year <- sub("-.*", "", meta$Date)
note <- sprintf("R package version %s", meta$Version)
diff --git a/man/NetCoMi-package.Rd b/man/NetCoMi-package.Rd
index 52359d4..d9ca788 100644
--- a/man/NetCoMi-package.Rd
+++ b/man/NetCoMi-package.Rd
@@ -29,3 +29,4 @@ Useful links:
\item Anastasiia Holovchak (package testing and editing)
}
}
+\keyword{internal}
diff --git a/man/dot-boottest.Rd b/man/dot-boottest.Rd
index 1c7fe82..5e4f3bf 100644
--- a/man/dot-boottest.Rd
+++ b/man/dot-boottest.Rd
@@ -59,3 +59,4 @@ Statistical significance of correlations between pairs of
\references{
{\insertRef{friedman2012inferring}{NetCoMi}}
}
+\keyword{internal}
diff --git a/man/dot-calcAssociation.Rd b/man/dot-calcAssociation.Rd
index 69a1b7d..53dc6a9 100644
--- a/man/dot-calcAssociation.Rd
+++ b/man/dot-calcAssociation.Rd
@@ -22,3 +22,4 @@ estimating associations/dissimilarities}
Computes associations between taxa or distances between subjects
for a given read count matrix
}
+\keyword{internal}
diff --git a/man/dot-calcProps.Rd b/man/dot-calcProps.Rd
index c36b952..d633b7b 100644
--- a/man/dot-calcProps.Rd
+++ b/man/dot-calcProps.Rd
@@ -127,3 +127,4 @@ for the largest connected component (LCC). If \code{TRUE}
\description{
Calculates network properties for a given adjacency matrix
}
+\keyword{internal}
diff --git a/man/dot-getVecNames.Rd b/man/dot-getVecNames.Rd
index 1bfe483..b1376a1 100644
--- a/man/dot-getVecNames.Rd
+++ b/man/dot-getVecNames.Rd
@@ -17,3 +17,4 @@ The function generates names for a vector that contains elements
and changed such that the names are generated by columns (and not by rows
as in the original function, which produces implausible results).
}
+\keyword{internal}
diff --git a/man/dot-permTestDiffAsso.Rd b/man/dot-permTestDiffAsso.Rd
index f3a3f30..5809b3f 100644
--- a/man/dot-permTestDiffAsso.Rd
+++ b/man/dot-permTestDiffAsso.Rd
@@ -155,3 +155,4 @@ The function implements procedures to test whether pairs of taxa
\seealso{
\code{\link{diffnet}}
}
+\keyword{internal}
diff --git a/tutorials/TUTORIAL_adapt_labels.Rmd b/tutorials/TUTORIAL_adapt_labels.Rmd
deleted file mode 100644
index eaf8347..0000000
--- a/tutorials/TUTORIAL_adapt_labels.Rmd
+++ /dev/null
@@ -1,219 +0,0 @@
----
-output: github_document
----
-
-```{r setup, echo = FALSE}
-knitr::opts_chunk$set(fig.path="figures/tutorial_adapt/")
-```
-
-## Tutorial: How to adapt taxa names in the network plot
-
-
-#### Use genus names instead of ASV names: Method 1
-
-In this method, genus names are passed as label vector to the plot function,
-so that genus names are shown in the plot, but not in the summary outputs.
-
-```{r load packages, results='hide', message=FALSE, warning=FALSE}
-library(NetCoMi)
-library(phyloseq)
-```
-
-```{r method_1}
-data("amgut2.filt.phy")
-
-colnames(amgut2.filt.phy@tax_table@.Data) <- c("Kingdom","Phylum","Class",
- "Order", "Family", "Genus",
- "Species")
-
-# Agglomerate to genus level
-amgut_genus <- tax_glom(amgut2.filt.phy,"Genus")
-
-# Split data set into seasonal allergies = yes/no
-levels(phyloseq::get_variable(amgut_genus, "SEASONAL_ALLERGIES"))
-amgut_g_yes = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "yes")
-amgut_g_no = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "no")
-
-
-# Network construction
-net_season <- netConstruct(data = amgut_g_yes,
- data2 = amgut_g_no,
- measure = "pearson",
- normMethod = "clr",
- zeroMethod = "none",
- sparsMethod = "threshold",
- thresh = 0.4,
- verbose = 1,
- seed = 123456)
-
-# Network analysis
-props_season <- netAnalyze(net_season,
- clustMethod = "cluster_fast_greedy")
-
-summary(props_season, showCentr = c("degree", "eigenvector"))
-
-# Create label vector
-labels <- as.vector(tax_table(amgut_genus)[, "Genus"])
-names(labels) <- rownames(tax_table(amgut_genus))
-
-# Optional: Shorten labels to a desired length
-#labels <- substr(labels, 1, 9)
-
-plot(props_season,
- repulsion = 0.9,
- labels = labels,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "intelligent",
- labelPattern = c(5,"'",3),
- labelLength = 10,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0.8,
- cexHubLabels = 0.8,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-
-# With shortened node labels:
-labels <- substr(labels, 1, 9)
-
-plot(props_season,
- repulsion = 0.9,
- labels = labels,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "intelligent",
- labelPattern = c(5,"'",3),
- labelLength = 10,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0.8,
- cexHubLabels = 0.8,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-
-```
-
-
-
-#### Use genus names instead of ASV names: Method 2
-
-In this method, row names of the count table (stored as `otu_table` in the
-phyloseq object) are set to genus names.
-
-Advantages:
-
- * Genus names are shown in the summaries
- * The functionality for adapting labels provided by NetCoMi's plot function can
- be applied
-
-
-```{r method_2}
-data("amgut2.filt.phy")
-
-colnames(amgut2.filt.phy@tax_table@.Data) <- c("Kingdom","Phylum","Class",
- "Order", "Family", "Genus",
- "Species")
-
-# Agglomerate to genus level
-amgut_genus <- tax_glom(amgut2.filt.phy,"Genus")
-
-# Split data set into seasonal allergies = yes/no
-levels(phyloseq::get_variable(amgut_genus, "SEASONAL_ALLERGIES"))
-amgut_g_yes = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "yes")
-amgut_g_no = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "no")
-
-
-# Rename taxa
-# (Since NetCoMi uses only the otu table, we just rename it's rownames)
-g_names <- as.vector(tax_table(amgut_genus)[, "Genus"])
-
-taxtab <- amgut_genus@tax_table@.Data
-
-# Make labels unique
-duplis <- g_names[duplicated(g_names) | duplicated(g_names, fromLast=TRUE)]
-
-while(length(duplis) > 0){
- duplis.sel <- duplis[duplis == duplis[1]]
-
- g_names[g_names == duplis.sel[1]] <- paste0(duplis.sel[1], 1:length(duplis.sel))
-
- duplis <- g_names[duplicated(g_names) | duplicated(g_names, fromLast=TRUE)]
-}
-
-rownames(amgut_g_yes@otu_table@.Data) <- g_names
-rownames(amgut_g_no@otu_table@.Data) <- g_names
-
-
-# Network construction
-net_season <- netConstruct(data = amgut_g_yes,
- data2 = amgut_g_no,
- measure = "pearson",
- normMethod = "clr",
- zeroMethod = "none",
- sparsMethod = "threshold",
- thresh = 0.4,
- verbose = 1,
- seed = 123456)
-
-# Network analysis
-props_season <- netAnalyze(net_season,
- clustMethod = "cluster_fast_greedy")
-
-# Network analysis summary (which now shows genus names)
-summary(props_season, showCentr = c("degree", "eigenvector"))
-
-
-plot(props_season,
- repulsion = 0.9,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "simple",
- labelLength = 9,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0.8,
- cexHubLabels = 0.8,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-
-```
-
-
-#### Only hub nodes are labeled
-
-If `cexLabels` is set to zero, only the hub node's labels are plotted.
-
-```{r hub_labels_only}
-plot(props_season,
- repulsion = 0.9,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "simple",
- labelLength = 9,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0,
- cexHubLabels = 1,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-
-```
-
-
diff --git a/tutorials/TUTORIAL_adapt_labels.md b/tutorials/TUTORIAL_adapt_labels.md
deleted file mode 100644
index b5eea3a..0000000
--- a/tutorials/TUTORIAL_adapt_labels.md
+++ /dev/null
@@ -1,434 +0,0 @@
-
-## Tutorial: How to adapt taxa names in the network plot
-
-#### Use genus names instead of ASV names: Method 1
-
-In this method, genus names are passed as label vector to the plot
-function, so that genus names are shown in the plot, but not in the
-summary outputs.
-
-``` r
-library(NetCoMi)
-library(phyloseq)
-```
-
-``` r
-data("amgut2.filt.phy")
-
-colnames(amgut2.filt.phy@tax_table@.Data) <- c("Kingdom","Phylum","Class",
- "Order", "Family", "Genus",
- "Species")
-
-# Agglomerate to genus level
-amgut_genus <- tax_glom(amgut2.filt.phy,"Genus")
-
-# Split data set into seasonal allergies = yes/no
-levels(phyloseq::get_variable(amgut_genus, "SEASONAL_ALLERGIES"))
-```
-
- ## [1] "no" "None" "yes"
-
-``` r
-amgut_g_yes = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "yes")
-amgut_g_no = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "no")
-
-
-# Network construction
-net_season <- netConstruct(data = amgut_g_yes,
- data2 = amgut_g_no,
- measure = "pearson",
- normMethod = "clr",
- zeroMethod = "none",
- sparsMethod = "threshold",
- thresh = 0.4,
- verbose = 1,
- seed = 123456)
-```
-
- ## Infos about changed arguments:
-
- ## Zero replacement needed for clr transformation. 'multRepl' used.
-
- ## 43 taxa and 120 samples remaining in group 1.
-
- ## 43 taxa and 162 samples remaining in group 2.
-
-``` r
-# Network analysis
-props_season <- netAnalyze(net_season,
- clustMethod = "cluster_fast_greedy")
-
-summary(props_season, showCentr = c("degree", "eigenvector"))
-```
-
- ##
- ## Component sizes
- ## ```````````````
- ## Group 1:
- ## size: 14 4 2 1
- ## #: 1 1 3 19
- ##
- ## Group 2:
- ## size: 16 2 1
- ## #: 1 1 25
- ## ______________________________
- ## Global network properties
- ## `````````````````````````
- ## Largest connected component (LCC):
- ## group '1' group '2'
- ## Relative LCC size 0.32558 0.37209
- ## Clustering coefficient 0.56856 0.62305
- ## Moduarity 0.22977 0.15556
- ## Positive edge percentage 59.25926 70.73171
- ## Edge density 0.29670 0.34167
- ## Natural connectivity 0.12468 0.13246
- ## Vertex connectivity 1.00000 1.00000
- ## Edge connectivity 1.00000 1.00000
- ## Average dissimilarity* 0.88722 0.86053
- ## Average path length** 1.38906 1.27974
- ##
- ## Whole network:
- ## group '1' group '2'
- ## Number of components 24.00000 27.00000
- ## Clustering coefficient 0.48109 0.62305
- ## Moduarity 0.41139 0.18849
- ## Positive edge percentage 60.60606 71.42857
- ## Edge density 0.03654 0.04651
- ## Natural connectivity 0.02994 0.03447
- ##
- ## *Dissimilarity = 1 - edge weight
- ## **Path length: Units with average dissimilarity
- ##
- ## ______________________________
- ## Clusters
- ## - In the whole network
- ## - Algorithm: cluster_fast_greedy
- ## ````````````````````````````````
- ## group '1':
- ## name: 0 1 2 3 4 5 6
- ## #: 19 8 2 2 4 6 2
- ##
- ## group '2':
- ## name: 0 1 2 3
- ## #: 25 6 10 2
- ##
- ## ______________________________
- ## Hubs
- ## - In alphabetical/numerical order
- ## - Based on empirical quantiles of centralities
- ## ```````````````````````````````````````````````
- ## group '1' group '2'
- ## 190307 184983
- ## 190464 188236
- ## 301645 190464
- ##
- ## ______________________________
- ## Centrality measures
- ## - In decreasing order
- ## - Centrality of disconnected components is zero
- ## ````````````````````````````````````````````````
- ## Degree (normalized):
- ## group '1' group '2'
- ## 301645 0.19048 0.19048
- ## 188236 0.16667 0.28571
- ## 190464 0.14286 0.21429
- ## 305760 0.11905 0.09524
- ## 190307 0.11905 0.11905
- ## ______ ______
- ## 188236 0.16667 0.28571
- ## 190464 0.14286 0.21429
- ## 301645 0.19048 0.19048
- ## 311477 0 0.16667
- ## 184983 0.09524 0.16667
- ##
- ## Eigenvector centrality (normalized):
- ## group '1' group '2'
- ## 190307 1 0.79318
- ## 190464 0.94119 0.97425
- ## 301645 0.92965 0.2581
- ## 188236 0.89014 1
- ## 305760 0.79044 0.21057
- ## ______ ______
- ## 188236 0.89014 1
- ## 190464 0.94119 0.97425
- ## 184983 0.76936 0.90943
- ## 190307 1 0.79318
- ## 311477 0 0.78083
-
-``` r
-# Create label vector
-labels <- as.vector(tax_table(amgut_genus)[, "Genus"])
-names(labels) <- rownames(tax_table(amgut_genus))
-
-# Optional: Shorten labels to a desired length
-#labels <- substr(labels, 1, 9)
-
-plot(props_season,
- repulsion = 0.9,
- labels = labels,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "intelligent",
- labelPattern = c(5,"'",3),
- labelLength = 10,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0.8,
- cexHubLabels = 0.8,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-```
-
-![](figures/tutorial_adapt/method_1-1.png)
-
-``` r
-# With shortened node labels:
-labels <- substr(labels, 1, 9)
-
-plot(props_season,
- repulsion = 0.9,
- labels = labels,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "intelligent",
- labelPattern = c(5,"'",3),
- labelLength = 10,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0.8,
- cexHubLabels = 0.8,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-```
-
-![](figures/tutorial_adapt/method_1-2.png)
-
-#### Use genus names instead of ASV names: Method 2
-
-In this method, row names of the count table (stored as `otu_table` in
-the phyloseq object) are set to genus names.
-
-Advantages:
-
-- Genus names are shown in the summaries
-- The functionality for adapting labels provided by NetCoMi’s plot
- function can be applied
-
-``` r
-data("amgut2.filt.phy")
-
-colnames(amgut2.filt.phy@tax_table@.Data) <- c("Kingdom","Phylum","Class",
- "Order", "Family", "Genus",
- "Species")
-
-# Agglomerate to genus level
-amgut_genus <- tax_glom(amgut2.filt.phy,"Genus")
-
-# Split data set into seasonal allergies = yes/no
-levels(phyloseq::get_variable(amgut_genus, "SEASONAL_ALLERGIES"))
-```
-
- ## [1] "no" "None" "yes"
-
-``` r
-amgut_g_yes = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "yes")
-amgut_g_no = subset_samples(amgut_genus, SEASONAL_ALLERGIES == "no")
-
-
-# Rename taxa
-# (Since NetCoMi uses only the otu table, we just rename it's rownames)
-g_names <- as.vector(tax_table(amgut_genus)[, "Genus"])
-
-taxtab <- amgut_genus@tax_table@.Data
-
-# Make labels unique
-duplis <- g_names[duplicated(g_names) | duplicated(g_names, fromLast=TRUE)]
-
-while(length(duplis) > 0){
- duplis.sel <- duplis[duplis == duplis[1]]
-
- g_names[g_names == duplis.sel[1]] <- paste0(duplis.sel[1], 1:length(duplis.sel))
-
- duplis <- g_names[duplicated(g_names) | duplicated(g_names, fromLast=TRUE)]
-}
-
-rownames(amgut_g_yes@otu_table@.Data) <- g_names
-rownames(amgut_g_no@otu_table@.Data) <- g_names
-
-
-# Network construction
-net_season <- netConstruct(data = amgut_g_yes,
- data2 = amgut_g_no,
- measure = "pearson",
- normMethod = "clr",
- zeroMethod = "none",
- sparsMethod = "threshold",
- thresh = 0.4,
- verbose = 1,
- seed = 123456)
-```
-
- ## Infos about changed arguments:
-
- ## Zero replacement needed for clr transformation. 'multRepl' used.
-
- ## 43 taxa and 120 samples remaining in group 1.
-
- ## 43 taxa and 162 samples remaining in group 2.
-
-``` r
-# Network analysis
-props_season <- netAnalyze(net_season,
- clustMethod = "cluster_fast_greedy")
-
-# Network analysis summary (which now shows genus names)
-summary(props_season, showCentr = c("degree", "eigenvector"))
-```
-
- ##
- ## Component sizes
- ## ```````````````
- ## Group 1:
- ## size: 14 4 2 1
- ## #: 1 1 3 19
- ##
- ## Group 2:
- ## size: 16 2 1
- ## #: 1 1 25
- ## ______________________________
- ## Global network properties
- ## `````````````````````````
- ## Largest connected component (LCC):
- ## group '1' group '2'
- ## Relative LCC size 0.32558 0.37209
- ## Clustering coefficient 0.56856 0.62305
- ## Moduarity 0.22977 0.15556
- ## Positive edge percentage 59.25926 70.73171
- ## Edge density 0.29670 0.34167
- ## Natural connectivity 0.12468 0.13246
- ## Vertex connectivity 1.00000 1.00000
- ## Edge connectivity 1.00000 1.00000
- ## Average dissimilarity* 0.88722 0.86053
- ## Average path length** 1.38906 1.27974
- ##
- ## Whole network:
- ## group '1' group '2'
- ## Number of components 24.00000 27.00000
- ## Clustering coefficient 0.48109 0.62305
- ## Moduarity 0.41139 0.18849
- ## Positive edge percentage 60.60606 71.42857
- ## Edge density 0.03654 0.04651
- ## Natural connectivity 0.02994 0.03447
- ##
- ## *Dissimilarity = 1 - edge weight
- ## **Path length: Units with average dissimilarity
- ##
- ## ______________________________
- ## Clusters
- ## - In the whole network
- ## - Algorithm: cluster_fast_greedy
- ## ````````````````````````````````
- ## group '1':
- ## name: 0 1 2 3 4 5 6
- ## #: 19 8 2 2 4 6 2
- ##
- ## group '2':
- ## name: 0 1 2 3
- ## #: 25 6 10 2
- ##
- ## ______________________________
- ## Hubs
- ## - In alphabetical/numerical order
- ## - Based on empirical quantiles of centralities
- ## ```````````````````````````````````````````````
- ## group '1' group '2'
- ## g__3 g__3
- ## g__6 g__5
- ## g__Coprococcus g__Faecalibacterium
- ##
- ## ______________________________
- ## Centrality measures
- ## - In decreasing order
- ## - Centrality of disconnected components is zero
- ## ````````````````````````````````````````````````
- ## Degree (normalized):
- ## group '1' group '2'
- ## g__6 0.19048 0.19048
- ## g__Faecalibacterium 0.16667 0.28571
- ## g__3 0.14286 0.21429
- ## g__Escherichia 0.11905 0.09524
- ## g__Coprococcus 0.11905 0.11905
- ## ______ ______
- ## g__Faecalibacterium 0.16667 0.28571
- ## g__3 0.14286 0.21429
- ## g__6 0.19048 0.19048
- ## g__2 0 0.16667
- ## g__5 0.09524 0.16667
- ##
- ## Eigenvector centrality (normalized):
- ## group '1' group '2'
- ## g__Coprococcus 1 0.79318
- ## g__3 0.94119 0.97425
- ## g__6 0.92965 0.2581
- ## g__Faecalibacterium 0.89014 1
- ## g__Escherichia 0.79044 0.21057
- ## ______ ______
- ## g__Faecalibacterium 0.89014 1
- ## g__3 0.94119 0.97425
- ## g__5 0.76936 0.90943
- ## g__Coprococcus 1 0.79318
- ## g__2 0 0.78083
-
-``` r
-plot(props_season,
- repulsion = 0.9,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "simple",
- labelLength = 9,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0.8,
- cexHubLabels = 0.8,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-```
-
-![](figures/tutorial_adapt/method_2-1.png)
-
-#### Only hub nodes are labeled
-
-If `cexLabels` is set to zero, only the hub node’s labels are plotted.
-
-``` r
-plot(props_season,
- repulsion = 0.9,
- sameLayout = TRUE,
- layoutGroup = "union",
- rmSingles = "inboth",
- shortenLabels = "simple",
- labelLength = 9,
- nodeSize = "mclr",
- labelScale = FALSE,
- cexNodes = 1.5,
- cexLabels = 0,
- cexHubLabels = 1,
- cexTitle = 1.2,
- groupNames = c("Wet", "Dry"),
- hubBorderCol = "gray40",
- mar = c(2, 6, 4, 6))
-```
-
-![](figures/tutorial_adapt/hub_labels_only-1.png)
diff --git a/tutorials/TUTORIAL_createAssoPerm.md b/tutorials/TUTORIAL_createAssoPerm.md
deleted file mode 100644
index 0feb8ee..0000000
--- a/tutorials/TUTORIAL_createAssoPerm.md
+++ /dev/null
@@ -1,823 +0,0 @@
-
-## Tutorial: How to use createAssoPerm()
-
-This tutorial is meant to demonstrate the options for storing and
-reloading count tables, association matrices and network properties of
-the permuted data if **permutation tests** are performed with
-`netCompare()` or `diffnet()`.
-
-NetCoMi (\>=1.0.2) includes the function `createAssoPerm()` for creating
-either only a matrix with permuted group labels or storing count tables
-and association matrices of the permuted data. The stored files can be
-passed to `netCompare()` and `diffnet()` so that users can repeatedly
-run these two functions with varying arguments, without the need to
-recompute the associations each time. `createAssoPerm()` additionally
-enables to compute the permutation associations block-wise (for a subset
-of permutations) and store them in separate files. These files can then
-be combined to one large matrix (with all permutations) and passed to
-`netCompare()` or `diffnet()`.
-
-The tutorial furthermore explains NetCoMi’s ability to handle **matched
-data sets**.
-
-#### Network construction and analysis
-
-We use data from the American Gut Project and conduct a network
-comparison between subjects with and without lactose intolerance.
-
-To demonstrate NetCoMi’s functionality for **matched data**, we build a
-“fake” 1:2 matched data set, where each two samples of the
-`LACTOSE = "no"` group are assigned to one sample of the
-`LACTOSE = "yes"` group. We use a subset of 150 samples, leading to 50
-samples in group “yes” and 100 samples in group “no”.
-
-``` r
-library(NetCoMi)
-library(phyloseq)
-```
-
-``` r
-set.seed(123456)
-
-# Load American Gut Data (from SpiecEasi package)
-data("amgut2.filt.phy")
-
-#table(amgut2.filt.phy@sam_data@.Data[[which(amgut2.filt.phy@sam_data@names == "LACTOSE")]])
-
-# Divide samples into two groups: with and without lactose intolerance
-lact_yes <- phyloseq::subset_samples(amgut2.filt.phy, LACTOSE == "yes")
-lact_no <- phyloseq::subset_samples(amgut2.filt.phy, LACTOSE == "no")
-
-# Extract count tables
-counts_yes <- t(as(phyloseq::otu_table(lact_yes), "matrix"))
-counts_no <- t(as(phyloseq::otu_table(lact_no), "matrix"))
-
-# Build the 1:2 matched data set
-counts_matched <- matrix(NA, nrow = 150, ncol = ncol(counts_yes))
-colnames(counts_matched) <- colnames(counts_yes)
-rownames(counts_matched) <- 1:150
-ind_yes <- ind_no <- 1
-
-for (i in 1:150) {
- if ((i-1)%%3 == 0) {
- counts_matched[i, ] <- counts_yes[ind_yes, ]
- rownames(counts_matched)[i] <- rownames(counts_yes)[ind_yes]
- ind_yes <- ind_yes + 1
- } else {
- counts_matched[i, ] <- counts_no[ind_no, ]
- rownames(counts_matched)[i] <- rownames(counts_no)[ind_no]
- ind_no <- ind_no + 1
- }
-}
-
-# The corresponding group vector used for splitting the data into two subsets.
-group_vec <- rep(c(1,2,2), 50)
-# Note: group "1" belongs to "yes", group "2" belongs to "no"
-
-# Network construction
-net_amgut <- netConstruct(counts_matched,
- group = group_vec,
- matchDesign = c(1,2),
- filtTax = "highestFreq",
- filtTaxPar = list(highestFreq = 50),
- measure = "pearson",
- zeroMethod = "pseudo",
- normMethod = "clr",
- sparsMethod = "threshold",
- thresh = 0.4,
- seed = 123456)
-```
-
- ## Checking input arguments ... Done.
- ## Data filtering ...
- ## 88 taxa removed.
- ## 50 taxa and 150 samples remaining.
- ##
- ## Zero treatment:
- ## Pseudo count of 1 added.
- ##
- ## Normalization:
- ## Execute clr(){SpiecEasi} ... Done.
- ##
- ## Calculate 'pearson' associations ... Done.
- ##
- ## Calculate associations in group 2 ... Done.
- ##
- ## Sparsify associations via 'threshold' ... Done.
- ##
- ## Sparsify associations in group 2 ... Done.
-
-``` r
-# Network analysis with default values
-props_amgut <- netAnalyze(net_amgut)
-
-#summary(props_amgut)
-
-# Network plot
-plot(props_amgut, sameLayout = TRUE, layoutGroup = "union",
- nodeSize = "clr", repulsion = 0.9, cexTitle = 3.7, cexNodes = 2,
- cexLabels = 2, groupNames = c("LACTOSE = yes", "LACTOSE = no"))
-
-legend("bottom", title = "estimated correlation:", legend = c("+","-"),
- col = c("#009900","red"), inset = 0.02, cex = 4, lty = 1, lwd = 4,
- bty = "n", horiz = TRUE)
-```
-
-![](figures/tutorial_create/network_construction-1.png)
-
-#### Network comparison via the “classical way”
-
-We conduct a network comparison with permutation tests to examine
-whether group differences are significant. In order to reduce the
-execution time, only 100 permutations are used. For real data sets, the
-number of permutations should be at least 1000 to get reliable results.
-
-The matrices with estimated associations for the permuted data are
-stored in an external file (in the current working directory) named
-`"assoPerm_comp"`.
-
-``` r
-# Network comparison
-comp_amgut_orig <- netCompare(props_amgut, permTest = TRUE, nPerm = 100,
- storeAssoPerm = TRUE,
- fileStoreAssoPerm = "assoPerm_comp",
- storeCountsPerm = FALSE,
- seed = 123456)
-```
-
- ## Checking input arguments ... Done.
- ## Calculate network properties ... Done.
- ## Files 'assoPerm_comp.bmat and assoPerm_comp.desc.txt created.
- ## Execute permutation tests ...
-
- ## | | | 0% | |= | 1% | |= | 2% | |== | 3% | |=== | 4% | |==== | 5% | |==== | 6% | |===== | 7% | |====== | 8% | |====== | 9% | |======= | 10% | |======== | 11% | |======== | 12% | |========= | 13% | |========== | 14% | |========== | 15% | |=========== | 16% | |============ | 17% | |============= | 18% | |============= | 19% | |============== | 20% | |=============== | 21% | |=============== | 22% | |================ | 23% | |================= | 24% | |================== | 25% | |================== | 26% | |=================== | 27% | |==================== | 28% | |==================== | 29% | |===================== | 30% | |====================== | 31% | |====================== | 32% | |======================= | 33% | |======================== | 34% | |======================== | 35% | |========================= | 36% | |========================== | 37% | |=========================== | 38% | |=========================== | 39% | |============================ | 40% | |============================= | 41% | |============================= | 42% | |============================== | 43% | |=============================== | 44% | |================================ | 45% | |================================ | 46% | |================================= | 47% | |================================== | 48% | |================================== | 49% | |=================================== | 50% | |==================================== | 51% | |==================================== | 52% | |===================================== | 53% | |====================================== | 54% | |====================================== | 55% | |======================================= | 56% | |======================================== | 57% | |========================================= | 58% | |========================================= | 59% | |========================================== | 60% | |=========================================== | 61% | |=========================================== | 62% | |============================================ | 63% | |============================================= | 64% | |============================================== | 65% | |============================================== | 66% | |=============================================== | 67% | |================================================ | 68% | |================================================ | 69% | |================================================= | 70% | |================================================== | 71% | |================================================== | 72% | |=================================================== | 73% | |==================================================== | 74% | |==================================================== | 75% | |===================================================== | 76% | |====================================================== | 77% | |======================================================= | 78% | |======================================================= | 79% | |======================================================== | 80% | |========================================================= | 81% | |========================================================= | 82% | |========================================================== | 83% | |=========================================================== | 84% | |============================================================ | 85% | |============================================================ | 86% | |============================================================= | 87% | |============================================================== | 88% | |============================================================== | 89% | |=============================================================== | 90% | |================================================================ | 91% | |================================================================ | 92% | |================================================================= | 93% | |================================================================== | 94% | |================================================================== | 95% | |=================================================================== | 96% | |==================================================================== | 97% | |===================================================================== | 98% | |===================================================================== | 99% | |======================================================================| 100%
-
- ## Done.
- ## Calculating p-values ... Done.
- ## Adjust for multiple testing using 'adaptBH' ... Done.
-
-``` r
-summary(comp_amgut_orig)
-```
-
- ##
- ## Comparison of Network Properties
- ## ----------------------------------
- ## CALL:
- ## netCompare(x = props_amgut, permTest = TRUE, nPerm = 100, seed = 123456,
- ## storeAssoPerm = TRUE, fileStoreAssoPerm = "assoPerm_comp",
- ## storeCountsPerm = FALSE)
- ##
- ## ______________________________
- ## Global network properties
- ## `````````````````````````
- ## Largest connected component (LCC):
- ## group '1' group '2' abs.diff. p-value
- ## Relative LCC size 0.480 0.400 0.080 0.950495
- ## Clustering coefficient 0.510 0.635 0.125 0.584158
- ## Modularity 0.261 0.175 0.085 0.524752
- ## Positive edge percentage 57.627 62.500 4.873 0.554455
- ## Edge density 0.214 0.295 0.081 0.693069
- ## Natural connectivity 0.080 0.109 0.029 0.742574
- ## Vertex connectivity 1.000 1.000 0.000 1.000000
- ## Edge connectivity 1.000 1.000 0.000 1.000000
- ## Average dissimilarity* 0.921 0.887 0.033 0.693069
- ## Average path length** 1.786 1.459 0.327 0.643564
- ##
- ## Whole network:
- ## group '1' group '2' abs.diff. p-value
- ## Number of components 21.000 27.000 6.000 0.861386
- ## Clustering coefficient 0.463 0.635 0.172 0.247525
- ## Modularity 0.332 0.252 0.080 0.504950
- ## Positive edge percentage 58.462 63.333 4.872 0.564356
- ## Edge density 0.053 0.049 0.004 0.871287
- ## Natural connectivity 0.030 0.031 0.001 0.772277
- ## -----
- ## p-values: one-tailed test with null hypothesis diff=0
- ## *: Dissimilarity = 1 - edge weight
- ## **: Path length = Units with average dissimilarity
- ##
- ## ______________________________
- ## Jaccard index (similarity betw. sets of most central nodes)
- ## ```````````````````````````````````````````````````````````
- ## Jacc P(<=Jacc) P(>=Jacc)
- ## degree 0.615 0.991177 0.034655 *
- ## betweenness centr. 0.312 0.546936 0.660877
- ## closeness centr. 0.529 0.972716 0.075475 .
- ## eigenvec. centr. 0.625 0.995960 0.015945 *
- ## hub taxa 0.500 0.888889 0.407407
- ## -----
- ## Jaccard index in [0,1] (1 indicates perfect agreement)
- ##
- ## ______________________________
- ## Adjusted Rand index (similarity betw. clusterings)
- ## ``````````````````````````````````````````````````
- ## wholeNet LCC
- ## ARI 0.383 0.281
- ## p-value 0.000 0.000
- ## -----
- ## ARI in [-1,1] with ARI=1: perfect agreement betw. clusterings
- ## ARI=0: expected for two random clusterings
- ## p-value: permutation test (n=1000) with null hypothesis ARI=0
- ##
- ## ______________________________
- ## Graphlet Correlation Distance
- ## `````````````````````````````
- ## wholeNet LCC
- ## GCD 0.914 2.241
- ## -----
- ## GCD >= 0 (GCD=0 indicates perfect agreement between GCMs)
- ##
- ## ______________________________
- ## Centrality measures
- ## - In decreasing order
- ## - Centrality of disconnected components is zero
- ## ````````````````````````````````````````````````
- ## Degree (normalized):
- ## group '1' group '2' abs.diff. adj.p-value
- ## 364563 0.061 0.245 0.184 1
- ## 190597 0.102 0.000 0.102 1
- ## 369164 0.102 0.000 0.102 1
- ## 184983 0.184 0.102 0.082 1
- ## 194648 0.000 0.082 0.082 1
- ## 353985 0.082 0.000 0.082 1
- ## 242070 0.082 0.000 0.082 1
- ## 307981 0.122 0.184 0.061 1
- ## 188236 0.122 0.184 0.061 1
- ## 363302 0.102 0.041 0.061 1
- ##
- ## Betweenness centrality (normalized):
- ## group '1' group '2' abs.diff. adj.p-value
- ## 353985 0.320 0.000 0.320 0.494890
- ## 364563 0.040 0.216 0.177 0.999678
- ## 188236 0.024 0.187 0.163 0.999678
- ## 307981 0.020 0.158 0.138 0.999678
- ## 157547 0.103 0.000 0.103 0.999678
- ## 71543 0.091 0.000 0.091 0.999678
- ## 590083 0.087 0.000 0.087 0.742335
- ## 190597 0.079 0.000 0.079 0.999678
- ## 194648 0.000 0.070 0.070 0.999678
- ## 288134 0.063 0.129 0.065 0.999678
- ##
- ## Closeness centrality (normalized):
- ## group '1' group '2' abs.diff. adj.p-value
- ## 190597 0.871 0.000 0.871 0.489307
- ## 194648 0.000 0.868 0.868 0.768911
- ## 242070 0.809 0.000 0.809 0.733961
- ## 369164 0.788 0.000 0.788 0.733961
- ## 302160 0.000 0.677 0.677 0.988400
- ## 353985 0.639 0.000 0.639 0.733961
- ## 516022 0.000 0.561 0.561 0.988400
- ## 181095 0.515 0.000 0.515 0.733961
- ## 590083 0.507 0.000 0.507 0.489307
- ## 470239 0.349 0.000 0.349 0.988400
- ##
- ## Eigenvector centrality (normalized):
- ## group '1' group '2' abs.diff. adj.p-value
- ## 242070 0.469 0.000 0.469 0.9998
- ## 307981 0.334 0.672 0.339 0.9998
- ## 301645 0.332 0.661 0.329 0.9998
- ## 364563 0.079 0.339 0.260 0.9998
- ## 369164 0.201 0.000 0.201 0.9998
- ## 326792 0.130 0.300 0.170 0.9998
- ## 157547 0.632 0.801 0.169 0.9998
- ## 190597 0.146 0.000 0.146 0.9998
- ## 363302 0.153 0.012 0.141 0.9998
- ## 71543 0.910 0.777 0.133 0.9998
- ##
- ## _________________________________________________________
- ## Significance codes: ***: 0.001, **: 0.01, *: 0.05, .: 0.1
-
-The network comparison is repeated, but this time, the stored
-permutation associations are loaded by `netCompare()`. This option might
-be useful to rerun the function with alternative multiple testing
-adjustment, without the need of re-estimating all associations.
-
-``` r
-# Network comparison
-comp_amgut1 <- netCompare(props_amgut, permTest = TRUE, nPerm = 100,
- fileLoadAssoPerm = "assoPerm_comp",
- storeCountsPerm = FALSE,
- seed = 123456)
-```
-
- ## Checking input arguments ... Done.
- ## Calculate network properties ... Done.
- ## Execute permutation tests ...
-
- ## | | | 0% | |= | 1% | |= | 2% | |== | 3% | |=== | 4% | |==== | 5% | |==== | 6% | |===== | 7% | |====== | 8% | |====== | 9% | |======= | 10% | |======== | 11% | |======== | 12% | |========= | 13% | |========== | 14% | |========== | 15% | |=========== | 16% | |============ | 17% | |============= | 18% | |============= | 19% | |============== | 20% | |=============== | 21% | |=============== | 22% | |================ | 23% | |================= | 24% | |================== | 25% | |================== | 26% | |=================== | 27% | |==================== | 28% | |==================== | 29% | |===================== | 30% | |====================== | 31% | |====================== | 32% | |======================= | 33% | |======================== | 34% | |======================== | 35% | |========================= | 36% | |========================== | 37% | |=========================== | 38% | |=========================== | 39% | |============================ | 40% | |============================= | 41% | |============================= | 42% | |============================== | 43% | |=============================== | 44% | |================================ | 45% | |================================ | 46% | |================================= | 47% | |================================== | 48% | |================================== | 49% | |=================================== | 50% | |==================================== | 51% | |==================================== | 52% | |===================================== | 53% | |====================================== | 54% | |====================================== | 55% | |======================================= | 56% | |======================================== | 57% | |========================================= | 58% | |========================================= | 59% | |========================================== | 60% | |=========================================== | 61% | |=========================================== | 62% | |============================================ | 63% | |============================================= | 64% | |============================================== | 65% | |============================================== | 66% | |=============================================== | 67% | |================================================ | 68% | |================================================ | 69% | |================================================= | 70% | |================================================== | 71% | |================================================== | 72% | |=================================================== | 73% | |==================================================== | 74% | |==================================================== | 75% | |===================================================== | 76% | |====================================================== | 77% | |======================================================= | 78% | |======================================================= | 79% | |======================================================== | 80% | |========================================================= | 81% | |========================================================= | 82% | |========================================================== | 83% | |=========================================================== | 84% | |============================================================ | 85% | |============================================================ | 86% | |============================================================= | 87% | |============================================================== | 88% | |============================================================== | 89% | |=============================================================== | 90% | |================================================================ | 91% | |================================================================ | 92% | |================================================================= | 93% | |================================================================== | 94% | |================================================================== | 95% | |=================================================================== | 96% | |==================================================================== | 97% | |===================================================================== | 98% | |===================================================================== | 99% | |======================================================================| 100%
-
- ## Done.
- ## Calculating p-values ... Done.
- ## Adjust for multiple testing using 'adaptBH' ... Done.
-
-``` r
-# Check whether the second comparison leads to equal results
-all.equal(comp_amgut_orig$properties, comp_amgut1$properties)
-```
-
- ## [1] TRUE
-
-The stored permutation associations can also be passed to `diffnet()` to
-construct a differential network.
-
-``` r
-# Construct differential network
-diffnet_amgut <- diffnet(net_amgut, diffMethod = "permute", nPerm = 100,
- fileLoadAssoPerm = "assoPerm_comp",
- storeCountsPerm = FALSE)
-```
-
- ## Checking input arguments ... Done.
- ## Execute permutation tests ...
-
- ## | | | 0% | |= | 1% | |= | 2% | |== | 3% | |=== | 4% | |==== | 5% | |==== | 6% | |===== | 7% | |====== | 8% | |====== | 9% | |======= | 10% | |======== | 11% | |======== | 12% | |========= | 13% | |========== | 14% | |========== | 15% | |=========== | 16% | |============ | 17% | |============= | 18% | |============= | 19% | |============== | 20% | |=============== | 21% | |=============== | 22% | |================ | 23% | |================= | 24% | |================== | 25% | |================== | 26% | |=================== | 27% | |==================== | 28% | |==================== | 29% | |===================== | 30% | |====================== | 31% | |====================== | 32% | |======================= | 33% | |======================== | 34% | |======================== | 35% | |========================= | 36% | |========================== | 37% | |=========================== | 38% | |=========================== | 39% | |============================ | 40% | |============================= | 41% | |============================= | 42% | |============================== | 43% | |=============================== | 44% | |================================ | 45% | |================================ | 46% | |================================= | 47% | |================================== | 48% | |================================== | 49% | |=================================== | 50% | |==================================== | 51% | |==================================== | 52% | |===================================== | 53% | |====================================== | 54% | |====================================== | 55% | |======================================= | 56% | |======================================== | 57% | |========================================= | 58% | |========================================= | 59% | |========================================== | 60% | |=========================================== | 61% | |=========================================== | 62% | |============================================ | 63% | |============================================= | 64% | |============================================== | 65% | |============================================== | 66% | |=============================================== | 67% | |================================================ | 68% | |================================================ | 69% | |================================================= | 70% | |================================================== | 71% | |================================================== | 72% | |=================================================== | 73% | |==================================================== | 74% | |==================================================== | 75% | |===================================================== | 76% | |====================================================== | 77% | |======================================================= | 78% | |======================================================= | 79% | |======================================================== | 80% | |========================================================= | 81% | |========================================================= | 82% | |========================================================== | 83% | |=========================================================== | 84% | |============================================================ | 85% | |============================================================ | 86% | |============================================================= | 87% | |============================================================== | 88% | |============================================================== | 89% | |=============================================================== | 90% | |================================================================ | 91% | |================================================================ | 92% | |================================================================= | 93% | |================================================================== | 94% | |================================================================== | 95% | |=================================================================== | 96% | |==================================================================== | 97% | |===================================================================== | 98% | |===================================================================== | 99% | |======================================================================| 100%
-
- ## Adjust for multiple testing using 'lfdr' ...
- ## Execute fdrtool() ...
-
- ## Step 1... determine cutoff point
- ## Step 2... estimate parameters of null distribution and eta0
- ## Step 3... compute p-values and estimate empirical PDF/CDF
- ## Step 4... compute q-values and local fdr
-
- ## Done.
-
-``` r
-plot(diffnet_amgut)
-```
-
- ## Error in plot.diffnet(diffnet_amgut): Network is empty.
-
-As expected for a number of permutations of only 100, there are no
-differential associations after multiple testing adjustment.
-
-Just to take a look how the differential network could look like, we
-plot the differential network based on non-adjusted p-values. Note that
-this approach is statistically not correct!
-
-``` r
-plot(diffnet_amgut, adjusted = FALSE,
- mar = c(2, 2, 5, 15), legendPos = c(1.2,1.2),
- legendArgs = list(bty = "n"),
- legendGroupnames = c("yes", "no"),
- legendTitle = "Correlations:")
-```
-
-![](figures/tutorial_create/diffnet_2-1.png)
-
-#### Network comparison using createAssoPerm()
-
-This time, the permutation association matrices are generated using
-`createAssoPerm()` and then passed to `netCompare()`.
-
-The output should be written to a variable because `createAssoPerm()`
-generally returns the matrix with permuted group labels.
-
-``` r
-permGroupMat <- createAssoPerm(props_amgut, nPerm = 100,
- computeAsso = TRUE,
- fileStoreAssoPerm = "assoPerm",
- storeCountsPerm = TRUE,
- append = FALSE, seed = 123456)
-```
-
-Let’s take a look at the permuted group labels. To interpret the group
-labels correctly, it is important to know, that within `netConstruct()`,
-the data set is divided into two matrices belonging to the two groups.
-For the permutation tests, the two matrices are combined by rows and for
-each permutation, the samples are reassigned to one of the two groups
-while keeping the matching design for matched data.
-
-In our case, the `permGroupMat` matrix consists of 100 rows
-(`nPerm = 100`) and 150 columns (our sample size). The first 50 columns
-belong to the first group (group “yes” in our case) and columns 51 to
-150 belong to the second group.
-
-Since each two samples of group 2 are matched to one sample of group 1,
-we number the group label matrix accordingly. Now, we can see that the
-matching design is kept: Since sample 3 is assigned to group 1, samples
-1 and 2 have to be assigned to group 2 and so on (entries \[1,1\] and
-\[1,51:52\] of `permGroupMat`).
-
-``` r
-seq1 <- seq(1,150, by = 3)
-seq2 <- seq(1:150)[!seq(1:150)%in%seq1]
-
-colnames(permGroupMat) <- c(seq1, seq2)
-
-permGroupMat[1:5, 1:10]
-```
-
- ## 1 4 7 10 13 16 19 22 25 28
- ## [1,] 2 2 2 2 1 2 2 1 2 2
- ## [2,] 2 2 2 1 1 2 2 2 2 2
- ## [3,] 2 2 1 1 2 2 2 1 2 2
- ## [4,] 2 2 1 2 1 2 2 2 1 1
- ## [5,] 2 2 2 2 2 1 2 2 2 2
-
-``` r
-permGroupMat[1:5, 51:71]
-```
-
- ## 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29 30 32
- ## [1,] 2 1 1 2 2 1 1 2 2 2 1 2 2 1 2 2 2 1 1 2 1
- ## [2,] 2 1 2 1 2 1 2 2 2 2 1 2 1 2 2 1 2 1 2 1 2
- ## [3,] 2 1 1 2 2 2 2 2 1 2 1 2 2 1 2 2 1 2 2 1 2
- ## [4,] 1 2 1 2 2 2 1 2 2 2 1 2 2 1 1 2 2 2 2 2 1
- ## [5,] 2 1 2 1 1 2 2 1 1 2 2 2 2 1 1 2 1 2 1 2 2
-
-As before, the stored permutation association matrices are passed to
-`netCompare()`.
-
-``` r
-comp_amgut2 <- netCompare(props_amgut, permTest = TRUE, nPerm = 100,
- fileLoadAssoPerm = "assoPerm",
- seed = 123456)
-```
-
- ## Checking input arguments ... Done.
- ## Calculate network properties ... Done.
- ## Execute permutation tests ...
-
- ## | | | 0% | |= | 1% | |= | 2% | |== | 3% | |=== | 4% | |==== | 5% | |==== | 6% | |===== | 7% | |====== | 8% | |====== | 9% | |======= | 10% | |======== | 11% | |======== | 12% | |========= | 13% | |========== | 14% | |========== | 15% | |=========== | 16% | |============ | 17% | |============= | 18% | |============= | 19% | |============== | 20% | |=============== | 21% | |=============== | 22% | |================ | 23% | |================= | 24% | |================== | 25% | |================== | 26% | |=================== | 27% | |==================== | 28% | |==================== | 29% | |===================== | 30% | |====================== | 31% | |====================== | 32% | |======================= | 33% | |======================== | 34% | |======================== | 35% | |========================= | 36% | |========================== | 37% | |=========================== | 38% | |=========================== | 39% | |============================ | 40% | |============================= | 41% | |============================= | 42% | |============================== | 43% | |=============================== | 44% | |================================ | 45% | |================================ | 46% | |================================= | 47% | |================================== | 48% | |================================== | 49% | |=================================== | 50% | |==================================== | 51% | |==================================== | 52% | |===================================== | 53% | |====================================== | 54% | |====================================== | 55% | |======================================= | 56% | |======================================== | 57% | |========================================= | 58% | |========================================= | 59% | |========================================== | 60% | |=========================================== | 61% | |=========================================== | 62% | |============================================ | 63% | |============================================= | 64% | |============================================== | 65% | |============================================== | 66% | |=============================================== | 67% | |================================================ | 68% | |================================================ | 69% | |================================================= | 70% | |================================================== | 71% | |================================================== | 72% | |=================================================== | 73% | |==================================================== | 74% | |==================================================== | 75% | |===================================================== | 76% | |====================================================== | 77% | |======================================================= | 78% | |======================================================= | 79% | |======================================================== | 80% | |========================================================= | 81% | |========================================================= | 82% | |========================================================== | 83% | |=========================================================== | 84% | |============================================================ | 85% | |============================================================ | 86% | |============================================================= | 87% | |============================================================== | 88% | |============================================================== | 89% | |=============================================================== | 90% | |================================================================ | 91% | |================================================================ | 92% | |================================================================= | 93% | |================================================================== | 94% | |================================================================== | 95% | |=================================================================== | 96% | |==================================================================== | 97% | |===================================================================== | 98% | |===================================================================== | 99% | |======================================================================| 100%
-
- ## Done.
- ## Calculating p-values ... Done.
- ## Adjust for multiple testing using 'adaptBH' ... Done.
-
-``` r
-# Are the network properties equal?
-all.equal(comp_amgut_orig$properties, comp_amgut2$properties)
-```
-
- ## [1] TRUE
-
-Using the `fm.open` function, we take a look at the stored matrices
-themselves.
-
-``` r
-# Open stored files and check whether they are equal
-assoPerm1 <- filematrix::fm.open(filenamebase = "assoPerm_comp" , readonly = TRUE)
-assoPerm2 <- filematrix::fm.open(filenamebase = "assoPerm" , readonly = TRUE)
-
-identical(as.matrix(assoPerm1), as.matrix(assoPerm2))
-```
-
- ## [1] TRUE
-
-``` r
-dim(as.matrix(assoPerm1))
-```
-
- ## [1] 5000 100
-
-``` r
-dim(as.matrix(assoPerm2))
-```
-
- ## [1] 5000 100
-
-``` r
-# Close files
-filematrix::close(assoPerm1)
-filematrix::close(assoPerm2)
-```
-
-#### Block-wise execution
-
-Due to limited resources, it might be meaningful to estimate the
-associations in blocks, that is, for a subset of permutations instead of
-all permutations at once. We’ll now see how to perform such a block-wise
-network comparison using NetCoMi’s functions. Note that in this
-approach, the external file is extended in each iteration, which is why
-it is not parallelizable.
-
-In the first step, `createAssoPerm` is used to generate the matrix with
-permuted group labels (for all permutations!). Hence, we set the
-`computeAsso` parameter to `FALSE`.
-
-``` r
-permGroupMat <- createAssoPerm(props_amgut, nPerm = 100,
- computeAsso = FALSE, seed = 123456)
-```
-
- ## Create matrix with permuted group labels ... Done.
-
-We now compute the association matrices in blocks of 20 permutations in
-each loop (leading to 5 iterations).
-
-Note: The `nPerm` argument must be set to the block size.
-
-The external file (containing the association matrices) must be extended
-in each loop, except for the first iteration, where the file is created.
-Thus, `append` is set to `TRUE` for `i >=2`.
-
-``` r
-nPerm_all <- 100
-blocksize <- 20
-repetitions <- nPerm_all / blocksize
-
-for (i in 1:repetitions) {
- print(i)
- if (i == 1) {
- # Create a new file in the first run
- tmp <- createAssoPerm(props_amgut, nPerm = blocksize,
- permGroupMat = permGroupMat[(i-1) * blocksize + 1:blocksize, ],
- computeAsso = TRUE,
- fileStoreAssoPerm = "assoPerm",
- storeCountsPerm = FALSE, append = FALSE)
- } else {
- tmp <- createAssoPerm(props_amgut, nPerm = blocksize,
- permGroupMat = permGroupMat[(i-1) * blocksize + 1:blocksize, ],
- computeAsso = TRUE,
- fileStoreAssoPerm = "assoPerm",
- storeCountsPerm = FALSE, append = TRUE)
- }
-
-}
-```
-
- ## [1] 1
-
- ## Files 'assoPerm.bmat and assoPerm.desc.txt created.
-
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
-
- ## [1] 2
-
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
-
- ## [1] 3
-
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
-
- ## [1] 4
-
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
-
- ## [1] 5
-
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
-
-The stored file, which now contains the associations of all 100
-permutations, can be passed to `netCompare()` as before.
-
-``` r
-comp_amgut3 <- netCompare(props_amgut, permTest = TRUE, nPerm = 100,
- storeAssoPerm = TRUE,
- fileLoadAssoPerm = "assoPerm",
- storeCountsPerm = FALSE, seed = 123456)
-```
-
- ## Checking input arguments ... Done.
- ## Calculate network properties ... Done.
- ## Execute permutation tests ...
-
- ## | | | 0% | |= | 1% | |= | 2% | |== | 3% | |=== | 4% | |==== | 5% | |==== | 6% | |===== | 7% | |====== | 8% | |====== | 9% | |======= | 10% | |======== | 11% | |======== | 12% | |========= | 13% | |========== | 14% | |========== | 15% | |=========== | 16% | |============ | 17% | |============= | 18% | |============= | 19% | |============== | 20% | |=============== | 21% | |=============== | 22% | |================ | 23% | |================= | 24% | |================== | 25% | |================== | 26% | |=================== | 27% | |==================== | 28% | |==================== | 29% | |===================== | 30% | |====================== | 31% | |====================== | 32% | |======================= | 33% | |======================== | 34% | |======================== | 35% | |========================= | 36% | |========================== | 37% | |=========================== | 38% | |=========================== | 39% | |============================ | 40% | |============================= | 41% | |============================= | 42% | |============================== | 43% | |=============================== | 44% | |================================ | 45% | |================================ | 46% | |================================= | 47% | |================================== | 48% | |================================== | 49% | |=================================== | 50% | |==================================== | 51% | |==================================== | 52% | |===================================== | 53% | |====================================== | 54% | |====================================== | 55% | |======================================= | 56% | |======================================== | 57% | |========================================= | 58% | |========================================= | 59% | |========================================== | 60% | |=========================================== | 61% | |=========================================== | 62% | |============================================ | 63% | |============================================= | 64% | |============================================== | 65% | |============================================== | 66% | |=============================================== | 67% | |================================================ | 68% | |================================================ | 69% | |================================================= | 70% | |================================================== | 71% | |================================================== | 72% | |=================================================== | 73% | |==================================================== | 74% | |==================================================== | 75% | |===================================================== | 76% | |====================================================== | 77% | |======================================================= | 78% | |======================================================= | 79% | |======================================================== | 80% | |========================================================= | 81% | |========================================================= | 82% | |========================================================== | 83% | |=========================================================== | 84% | |============================================================ | 85% | |============================================================ | 86% | |============================================================= | 87% | |============================================================== | 88% | |============================================================== | 89% | |=============================================================== | 90% | |================================================================ | 91% | |================================================================ | 92% | |================================================================= | 93% | |================================================================== | 94% | |================================================================== | 95% | |=================================================================== | 96% | |==================================================================== | 97% | |===================================================================== | 98% | |===================================================================== | 99% | |======================================================================| 100%
-
- ## Done.
- ## Calculating p-values ... Done.
- ## Adjust for multiple testing using 'adaptBH' ... Done.
-
-``` r
-# Are the network properties equal to the first comparison?
-all.equal(comp_amgut_orig$properties, comp_amgut3$properties)
-```
-
- ## [1] TRUE
-
-``` r
-# Open stored files and check whether they are equal
-assoPerm1 <- fm.open(filenamebase = "assoPerm_comp" , readonly = TRUE)
-assoPerm3 <- fm.open(filenamebase = "assoPerm" , readonly = TRUE)
-
-all.equal(as.matrix(assoPerm1), as.matrix(assoPerm3))
-```
-
- ## [1] TRUE
-
-``` r
-dim(as.matrix(assoPerm1))
-```
-
- ## [1] 5000 100
-
-``` r
-dim(as.matrix(assoPerm3))
-```
-
- ## [1] 5000 100
-
-``` r
-# Close files
-close(assoPerm1)
-close(assoPerm3)
-```
-
-#### Block-wise execution (executable in parallel)
-
-If the blocks should be computed in parallel, extending the `"assoPerm"`
-file in each iteration would not work. To be able to run the blocks in
-parallel, we have to create a separate file in each iteration and
-combine them at the end.
-
-``` r
-# Create the matrix with permuted group labels (as before)
-permGroupMat <- createAssoPerm(props_amgut, nPerm = 100, computeAsso = FALSE,
- seed = 123456)
-```
-
- ## Create matrix with permuted group labels ... Done.
-
-``` r
-nPerm_all <- 100
-blocksize <- 20
-repetitions <- nPerm_all / blocksize # 5 repetitions
-
-# Execute as standard for-loop:
-for (i in 1:repetitions) {
- tmp <- createAssoPerm(props_amgut, nPerm = blocksize,
- permGroupMat = permGroupMat[(i-1) * blocksize + 1:blocksize, ],
- computeAsso = TRUE,
- fileStoreAssoPerm = paste0("assoPerm", i),
- storeCountsPerm = FALSE, append = FALSE)
-
-}
-```
-
- ## Files 'assoPerm1.bmat and assoPerm1.desc.txt created.
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
- ## Files 'assoPerm2.bmat and assoPerm2.desc.txt created.
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
- ## Files 'assoPerm3.bmat and assoPerm3.desc.txt created.
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
- ## Files 'assoPerm4.bmat and assoPerm4.desc.txt created.
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
- ## Files 'assoPerm5.bmat and assoPerm5.desc.txt created.
- ## Compute permutation associations ...
-
- ## | | | 0% | |==== | 5% | |======= | 10% | |========== | 15% | |============== | 20% | |================== | 25% | |===================== | 30% | |======================== | 35% | |============================ | 40% | |================================ | 45% | |=================================== | 50% | |====================================== | 55% | |========================================== | 60% | |============================================== | 65% | |================================================= | 70% | |==================================================== | 75% | |======================================================== | 80% | |============================================================ | 85% | |=============================================================== | 90% | |================================================================== | 95% | |======================================================================| 100%
-
- ## Done.
-
-``` r
-# OR execute in parallel:
-library("foreach")
-
-cores <- 2 # Please choose an appropriate number of cores
-
-cl <- parallel::makeCluster(cores)
-doSNOW::registerDoSNOW(cl)
-
-# Create progress bar:
-pb <- utils::txtProgressBar(0, repetitions, style=3)
-```
-
- ## | | | 0%
-
-``` r
-progress <- function(n) {
- utils::setTxtProgressBar(pb, n)
-}
-
-opts <- list(progress = progress)
-
-tmp <- foreach(i = 1:repetitions,
- .packages = c("NetCoMi"),
- .options.snow = opts) %dopar% {
-
- progress(i)
- NetCoMi::createAssoPerm(props_amgut, nPerm = blocksize,
- permGroupMat = permGroupMat[(i-1) * blocksize + 1:blocksize, ],
- computeAsso = TRUE,
- fileStoreAssoPerm = paste0("assoPerm", i),
- storeCountsPerm = FALSE, append = FALSE)
-}
-```
-
- ## | |============== | 20% | |============================ | 40% | |========================================== | 60% | |======================================================== | 80% | |======================================================================| 100%
-
-``` r
-# Close progress bar
-close(pb)
-```
-
-``` r
-# Stop cluster
-parallel::stopCluster(cl)
-
-
-# Combine the matrices and store them into a new file (because netCompare()
-# needs an external file)
-assoPerm_all <- NULL
-
-for (i in 1:repetitions) {
-
- assoPerm_tmp <- fm.open(filenamebase = paste0("assoPerm", i) , readonly = TRUE)
-
- assoPerm_all <- rbind(assoPerm_all, as.matrix(assoPerm_tmp))
-
- close(assoPerm_tmp)
-}
-
-dim(assoPerm_all)
-```
-
- ## [1] 5000 100
-
-``` r
-# Combine the permutation association matrices
-fm.create.from.matrix(filenamebase = "assoPerm", mat = assoPerm_all)
-```
-
- ## 5000 x 100 filematrix with 8 byte "double" elements
-
-As last step, we pass the file containing the combined matrix to
-`netCompare()`.
-
-``` r
-comp_amgut4 <- netCompare(props_amgut, permTest = TRUE, nPerm = 100,
- fileLoadAssoPerm = "assoPerm",
- storeCountsPerm = FALSE, seed = 123456)
-```
-
- ## Checking input arguments ... Done.
- ## Calculate network properties ... Done.
- ## Execute permutation tests ...
-
- ## | | | 0% | |= | 1% | |= | 2% | |== | 3% | |=== | 4% | |==== | 5% | |==== | 6% | |===== | 7% | |====== | 8% | |====== | 9% | |======= | 10% | |======== | 11% | |======== | 12% | |========= | 13% | |========== | 14% | |========== | 15% | |=========== | 16% | |============ | 17% | |============= | 18% | |============= | 19% | |============== | 20% | |=============== | 21% | |=============== | 22% | |================ | 23% | |================= | 24% | |================== | 25% | |================== | 26% | |=================== | 27% | |==================== | 28% | |==================== | 29% | |===================== | 30% | |====================== | 31% | |====================== | 32% | |======================= | 33% | |======================== | 34% | |======================== | 35% | |========================= | 36% | |========================== | 37% | |=========================== | 38% | |=========================== | 39% | |============================ | 40% | |============================= | 41% | |============================= | 42% | |============================== | 43% | |=============================== | 44% | |================================ | 45% | |================================ | 46% | |================================= | 47% | |================================== | 48% | |================================== | 49% | |=================================== | 50% | |==================================== | 51% | |==================================== | 52% | |===================================== | 53% | |====================================== | 54% | |====================================== | 55% | |======================================= | 56% | |======================================== | 57% | |========================================= | 58% | |========================================= | 59% | |========================================== | 60% | |=========================================== | 61% | |=========================================== | 62% | |============================================ | 63% | |============================================= | 64% | |============================================== | 65% | |============================================== | 66% | |=============================================== | 67% | |================================================ | 68% | |================================================ | 69% | |================================================= | 70% | |================================================== | 71% | |================================================== | 72% | |=================================================== | 73% | |==================================================== | 74% | |==================================================== | 75% | |===================================================== | 76% | |====================================================== | 77% | |======================================================= | 78% | |======================================================= | 79% | |======================================================== | 80% | |========================================================= | 81% | |========================================================= | 82% | |========================================================== | 83% | |=========================================================== | 84% | |============================================================ | 85% | |============================================================ | 86% | |============================================================= | 87% | |============================================================== | 88% | |============================================================== | 89% | |=============================================================== | 90% | |================================================================ | 91% | |================================================================ | 92% | |================================================================= | 93% | |================================================================== | 94% | |================================================================== | 95% | |=================================================================== | 96% | |==================================================================== | 97% | |===================================================================== | 98% | |===================================================================== | 99% | |======================================================================| 100%
-
- ## Done.
- ## Calculating p-values ... Done.
- ## Adjust for multiple testing using 'adaptBH' ... Done.
-
-``` r
-# Are the network properties equal to those of the first comparison?
-all.equal(comp_amgut_orig$properties, comp_amgut4$properties)
-```
-
- ## [1] TRUE
-
-``` r
-# Open stored files and check whether they are equal
-assoPerm1 <- fm.open(filenamebase = "assoPerm_comp" , readonly = TRUE)
-assoPerm4 <- fm.open(filenamebase = "assoPerm" , readonly = TRUE)
-identical(as.matrix(assoPerm1), as.matrix(assoPerm4))
-```
-
- ## [1] TRUE
-
-``` r
-dim(as.matrix(assoPerm1))
-```
-
- ## [1] 5000 100
-
-``` r
-dim(as.matrix(assoPerm4))
-```
-
- ## [1] 5000 100
-
-``` r
-# Close files
-close(assoPerm1)
-close(assoPerm4)
-```
diff --git a/tutorials/TUTORIAL_cross-domain_associations.md b/tutorials/TUTORIAL_cross-domain_associations.md
deleted file mode 100644
index f635cb3..0000000
--- a/tutorials/TUTORIAL_cross-domain_associations.md
+++ /dev/null
@@ -1,268 +0,0 @@
-
-## Tutorial: How to compare cross-domain association networks using NetCoMi and SpiecEasi?
-
-In this tutorial, we use the same data as in the “Cross domain
-interactions” section of the [SpiecEasi
-tutorial](https://github.com/zdk123/SpiecEasi).
-
-The samples are split into two groups and cross-domain associations are
-computed for each group using SpiecEasi. The association matrices are
-then passed to NetCoMi’s netConstruct() function to conduct a network
-comparison between the two groups.
-
-**Note:**
-This tutorial explains how two cross-domain networks are constructed and
-compared. For constructing a single network, skip the step where the
-data are split into two groups and perform the framework only for a
-single data set (i.e. pass the estimated association matrix to the
-“data” argument of netConstruct() and continue with NetCoMi’s standard
-pipeline).
-
-``` r
-library(SpiecEasi)
-library(phyloseq)
-
-data(hmp2)
-
-# Store count matrices (taxa are columns)
-counts_hmp216S <- as.matrix(t(phyloseq::otu_table(hmp216S)@.Data))
-counts_hmp2prot <- as.matrix(t(phyloseq::otu_table(hmp2prot)@.Data))
-
-# Assume, the first 23 samples are in one group and the remaining 24 samples in the other group
-group_vec <- c(rep(1, 23), rep(2, 24))
-
-# Split count matrices
-counts_hmp216S_gr1 <- counts_hmp216S[group_vec == 1, ]
-counts_hmp216S_gr2 <- counts_hmp216S[group_vec == 2, ]
-
-counts_hmp2prot_gr1 <- counts_hmp2prot[group_vec == 1, ]
-counts_hmp2prot_gr2 <- counts_hmp2prot[group_vec == 2, ]
-
-set.seed(123456)
-
-# Run SpiecEasi and create association matrix for group 1
-spiec_result_gr1 <- multi.spiec.easi(list(counts_hmp216S_gr1, counts_hmp2prot_gr1),
- method='mb', nlambda=40,
- lambda.min.ratio=1e-2,
- pulsar.params = list(thresh = 0.05))
-```
-
- ## Warning in spiec.easi.list(datalist, method = method, sel.criterion =
- ## sel.criterion, : input list contains data of mixed classes.
-
- ## Applying data transformations...
-
- ## Selecting model with pulsar using stars...
-
- ## Fitting final estimate with mb...
-
- ## done
-
-``` r
-assoMat1 <- SpiecEasi::symBeta(SpiecEasi::getOptBeta(spiec_result_gr1), mode = "ave")
-
-assoMat1 <- as.matrix(assoMat1)
-
-# Run SpiecEasi and create association matrix for group 2
-spiec_result_gr2 <- multi.spiec.easi(list(counts_hmp216S_gr2, counts_hmp2prot_gr2),
- method='mb', nlambda=40,
- lambda.min.ratio=1e-2,
- pulsar.params = list(thresh = 0.05))
-```
-
- ## Warning in spiec.easi.list(datalist, method = method, sel.criterion =
- ## sel.criterion, : input list contains data of mixed classes.
-
- ## Applying data transformations...
-
- ## Selecting model with pulsar using stars...
-
- ## Fitting final estimate with mb...
-
- ## done
-
-``` r
-assoMat2 <- SpiecEasi::symBeta(SpiecEasi::getOptBeta(spiec_result_gr2), mode = "ave")
-
-assoMat2 <- as.matrix(assoMat2)
-
-# Get taxa names
-taxnames <- c(taxa_names(hmp216S), taxa_names(hmp2prot))
-
-colnames(assoMat1) <- rownames(assoMat1) <- taxnames
-diag(assoMat1) <- 1
-
-colnames(assoMat2) <- rownames(assoMat2) <- taxnames
-diag(assoMat2) <- 1
-```
-
-``` r
-# NetCoMi workflow
-
-library(NetCoMi)
-```
-
- ##
-
-``` r
-# Network construction (pass association matrices to netConstruct)
-# - sparsMethod must be set to "none" because sparsification is already included in SpiecEasi
-net_hmp_16S_prot <- netConstruct(data = assoMat1, data2 = assoMat2,
- dataType = "condDependence",
- sparsMethod = "none")
-
-# Network analysis
-netprops_hmp_16S_prot <- netAnalyze(net_hmp_16S_prot, hubPar = "eigenvector")
-```
-
-``` r
-nodeCols <- c(rep("lightblue", ntaxa(hmp216S)), rep("orange", ntaxa(hmp2prot)))
-names(nodeCols) <- taxnames
-
-plot(netprops_hmp_16S_prot,
- sameLayout = TRUE,
- layoutGroup = "union",
- nodeColor = "colorVec",
- colorVec = nodeCols,
- nodeSize = "eigen",
- nodeSizeSpread = 2,
- labelScale = FALSE,
- cexNodes = 2,
- cexLabels = 2,
- cexHubLabels = 2.5,
- cexTitle = 3.8,
- groupNames = c("group1", "group2"))
-
-
-legend(-0.2, 1.2, cex = 3, pt.cex = 4,
- legend = c("HMP2 16S", "HMP2 protein"), col = c("lightblue", "orange"),
- bty = "n", pch = 16)
-```
-
-![](figures/tutorial_cross-domain/network_plot-1.png)
-
-``` r
-# Network comparison
-# - Permutation tests cannot be performed because the association matrices are
-# used for network construction. For permutation tests, however, the count
-# data are needed.
-netcomp_hmp_16S_prot <- netCompare(netprops_hmp_16S_prot, permTest = FALSE)
-
-summary(netcomp_hmp_16S_prot, groupNames = c("group1", "group2"))
-```
-
- ##
- ## Comparison of Network Properties
- ## ----------------------------------
- ## CALL:
- ## netCompare(x = netprops_hmp_16S_prot, permTest = FALSE)
- ##
- ## ______________________________
- ## Global network properties
- ## `````````````````````````
- ## Largest connected component (LCC):
- ## group1 group2 difference
- ## Relative LCC size 0.818 0.614 0.205
- ## Clustering coefficient 0.042 0.128 0.086
- ## Moduarity 0.661 0.637 0.024
- ## Positive edge percentage 62.222 57.576 4.646
- ## Edge density 0.035 0.046 0.011
- ## Natural connectivity 0.017 0.022 0.006
- ## Vertex connectivity 1.000 1.000 0.000
- ## Edge connectivity 1.000 1.000 0.000
- ## Average dissimilarity* 0.989 0.986 0.003
- ## Average path length** 3.848 3.638 0.210
- ##
- ## Whole network:
- ## group1 group2 difference
- ## Number of components 10.000 27.000 17.000
- ## Clustering coefficient 0.041 0.116 0.075
- ## Moduarity 0.695 0.693 0.002
- ## Positive edge percentage 62.887 60.000 2.887
- ## Edge density 0.025 0.020 0.006
- ## Natural connectivity 0.013 0.013 0.000
- ## -----
- ## *: Dissimilarity = 1 - edge weight
- ## **Path length: Units with average dissimilarity
- ##
- ## ______________________________
- ## Jaccard index (similarity betw. sets of most central nodes)
- ## ``````````````````````````````````````````````````````````
- ## Jacc P(<=Jacc) P(>=Jacc)
- ## degree 0.138 0.016100 * 0.995512
- ## betweenness centr. 0.222 0.105480 0.948638
- ## closeness centr. 0.257 0.221235 0.873473
- ## eigenvec. centr. 0.222 0.105480 0.948638
- ## hub taxa 0.000 0.017342 * 1.000000
- ## -----
- ## Jaccard index ranges from 0 (compl. different) to 1 (sets equal)
- ##
- ## ______________________________
- ## Adjusted Rand index (similarity betw. clusterings)
- ## ``````````````````````````````````````````````````
- ## ARI p-value
- ## 0.056 0.005
- ## -----
- ## ARI in [-1,1] with ARI=1: perfect agreement betw. clusterings,
- ## ARI=0: expected for two random clusterings
- ## p-value: two-tailed test with null hypothesis ARI=0
- ##
- ## ______________________________
- ## Centrality measures
- ## - In decreasing order
- ## - Centrality of disconnected components is zero
- ## ````````````````````````````````````````````````
- ## Degree (normalized):
- ## group1 group2 abs.diff.
- ## 5.1.3.3 0.092 0.023 0.069
- ## 6.3.5.5 0.011 0.069 0.057
- ## 1.2.1.12 0.057 0.000 0.057
- ## K02992 0.057 0.000 0.057
- ## 6.3.2.6 0.023 0.069 0.046
- ## 2.6.1.52 0.046 0.000 0.046
- ## K00626 0.046 0.000 0.046
- ## K03043 0.069 0.023 0.046
- ## Unc054vi 0.011 0.046 0.034
- ## Unc00y95 0.011 0.046 0.034
- ##
- ## Betweenness centrality (normalized):
- ## group1 group2 abs.diff.
- ## 5.1.3.3 0.421 0.033 0.388
- ## 6.3.5.5 0.000 0.348 0.348
- ## 2.7.7.6 0.208 0.542 0.334
- ## Unc01c0q 0.028 0.352 0.324
- ## 6.3.2.6 0.082 0.404 0.322
- ## 1.2.1.12 0.308 0.000 0.308
- ## 2.3.1.29 0.028 0.234 0.206
- ## K15633 0.000 0.174 0.174
- ## K01006 0.234 0.074 0.160
- ## 3.6.3.14 0.265 0.125 0.140
- ##
- ## Closeness centrality (normalized):
- ## group1 group2 abs.diff.
- ## 1.2.1.12 0.468 0.000 0.468
- ## K02992 0.463 0.000 0.463
- ## 6.3.4.4 0.000 0.435 0.435
- ## 2.2.1.1 0.000 0.411 0.411
- ## K02935 0.394 0.000 0.394
- ## 4.1.1.49 0.000 0.393 0.393
- ## 2.6.1.52 0.392 0.000 0.392
- ## K10439 0.381 0.000 0.381
- ## K02357 0.365 0.000 0.365
- ## K00074 0.362 0.000 0.362
- ##
- ## Eigenvector centrality (normalized):
- ## group1 group2 abs.diff.
- ## 5.1.3.3 1.000 0.028 0.972
- ## 5.3.1.12 0.816 0.055 0.762
- ## K03043 0.748 0.000 0.747
- ## 6.3.5.5 0.032 0.743 0.712
- ## K02992 0.682 0.000 0.682
- ## 1.2.1.12 0.651 0.000 0.651
- ## 5.3.1.25 0.608 0.014 0.594
- ## Unc054vi 0.001 0.551 0.550
- ## K01812 0.591 0.155 0.436
- ## 2.7.7.8 0.127 0.553 0.426
- ##
- ## _________________________________________________________
- ## Significance codes: ***: 0.001, **: 0.01, *: 0.05, .: 0.1
diff --git a/tutorials/figures/NetCoMi_logo_800x400_300dpi.png b/tutorials/figures/NetCoMi_logo_800x400_300dpi.png
deleted file mode 100644
index 9fa01a2..0000000
Binary files a/tutorials/figures/NetCoMi_logo_800x400_300dpi.png and /dev/null differ
diff --git a/tutorials/figures/tutorial_adapt/hub_labels_only-1.png b/tutorials/figures/tutorial_adapt/hub_labels_only-1.png
deleted file mode 100644
index cb103a0..0000000
Binary files a/tutorials/figures/tutorial_adapt/hub_labels_only-1.png and /dev/null differ
diff --git a/tutorials/figures/tutorial_adapt/method_1-1.png b/tutorials/figures/tutorial_adapt/method_1-1.png
deleted file mode 100644
index 0fdafb8..0000000
Binary files a/tutorials/figures/tutorial_adapt/method_1-1.png and /dev/null differ
diff --git a/tutorials/figures/tutorial_adapt/method_1-2.png b/tutorials/figures/tutorial_adapt/method_1-2.png
deleted file mode 100644
index 946617e..0000000
Binary files a/tutorials/figures/tutorial_adapt/method_1-2.png and /dev/null differ
diff --git a/tutorials/figures/tutorial_adapt/method_2-1.png b/tutorials/figures/tutorial_adapt/method_2-1.png
deleted file mode 100644
index 2dd4e6f..0000000
Binary files a/tutorials/figures/tutorial_adapt/method_2-1.png and /dev/null differ
diff --git a/tutorials/figures/tutorial_create/diffnet_2-1.png b/tutorials/figures/tutorial_create/diffnet_2-1.png
deleted file mode 100644
index f55198a..0000000
Binary files a/tutorials/figures/tutorial_create/diffnet_2-1.png and /dev/null differ
diff --git a/tutorials/figures/tutorial_create/network_construction-1.png b/tutorials/figures/tutorial_create/network_construction-1.png
deleted file mode 100644
index 293b445..0000000
Binary files a/tutorials/figures/tutorial_create/network_construction-1.png and /dev/null differ
diff --git a/tutorials/figures/tutorial_cross-domain/network_plot-1.png b/tutorials/figures/tutorial_cross-domain/network_plot-1.png
deleted file mode 100644
index 3739b3b..0000000
Binary files a/tutorials/figures/tutorial_cross-domain/network_plot-1.png and /dev/null differ
diff --git a/vignettes/NetCoMi.Rmd b/vignettes/NetCoMi.Rmd
index 566fbd7..4d5a76a 100644
--- a/vignettes/NetCoMi.Rmd
+++ b/vignettes/NetCoMi.Rmd
@@ -1,5 +1,5 @@
---
-title: "NetCoMi"
+title: "Get started"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{NetCoMi}
@@ -8,20 +8,20 @@ vignette: >
---
```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
-We use the American Gut data from [`SpiecEasi`](https://github.com/zdk123/SpiecEasi)
-package to look at some examples of how NetCoMi is applied. NetCoMi's main
-functions are `netConstruct()` for network construction, `netAnalyze()` for
-network analysis, and `netCompare()` for network comparison.
-As you will see in the following, these three functions must
-be executed in the aforementioned order. A further function is `diffnet()` for
-constructing a differential association network. `diffnet()` must be applied to
-the object returned by `netConstruct()`.
+The American Gut data provided by the [`SpiecEasi`](https://github.com/zdk123/SpiecEasi) package are used for almost all NetCoMi tutorials.
+
+We begin by constructing a single network, which we analyze using quantitative and graphical methods. Later, we will compare the networks of two groups: Individuals with and without seasonal allergies.
+
+NetCoMi's main functions are `netConstruct()` for network construction, `netAnalyze()` for network analysis, and `netCompare()` for network comparison. As should become clear from the following examples, these three functions must be executed in the aforementioned order. A further function is `diffnet()` for constructing a differential association network. `diffnet()` must be applied to the object returned by `netConstruct()`.
First of all, we load NetCoMi and the data from American Gut Project (provided
by [`SpiecEasi`](https://github.com/zdk123/SpiecEasi), which is automatically
@@ -33,13 +33,9 @@ data("amgut1.filt")
data("amgut2.filt.phy")
```
-### Network with SPRING as association measure
+### Network construction
-#### Network construction and analysis
-
-We firstly construct a single association network using
-[SPRING](https://github.com/GraceYoon/SPRING) for
-estimating associations (conditional dependence) between OTUs.
+We use the [SPRING](https://github.com/GraceYoon/SPRING) package for estimating associations (conditional dependence) between OTUs.
The data are filtered within `netConstruct()` as follows:
@@ -84,7 +80,7 @@ net_spring <- netConstruct(amgut1.filt,
seed = 123456)
```
-#### Analyzing the constructed network
+### Network analysis
NetCoMi's `netAnalyze()` function is used for analyzing the constructed
network(s).
@@ -106,13 +102,13 @@ graphlet correlations in the upper triangle and significance codes resulting
from Student's t-test in the lower triangle).
See `?calcGCM` and `?testGCM` for details.
-
```{r single_spring_2, fig.height=6, fig.width=6}
props_spring <- netAnalyze(net_spring,
centrLCC = TRUE,
clustMethod = "cluster_fast_greedy",
hubPar = "eigenvector",
- weightDeg = FALSE, normDeg = FALSE)
+ weightDeg = FALSE,
+ normDeg = FALSE)
#?summary.microNetProps
summary(props_spring, numbNodes = 5L)
@@ -140,7 +136,7 @@ text(6, -0.2, xpd = NA,
```
-#### Visualizing the network
+### Visualizing the network
We use the determined clusters as node colors and scale
the node sizes according to the node's eigenvector centrality.
diff --git a/vignettes/asso_as_input.Rmd b/vignettes/asso_as_input.Rmd
index 4a36cd1..dcf5300 100644
--- a/vignettes/asso_as_input.Rmd
+++ b/vignettes/asso_as_input.Rmd
@@ -8,6 +8,9 @@ vignette: >
---
```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
diff --git a/tutorials/TUTORIAL_createAssoPerm.Rmd b/vignettes/create_asso_perm.Rmd
similarity index 94%
rename from tutorials/TUTORIAL_createAssoPerm.Rmd
rename to vignettes/create_asso_perm.Rmd
index 57f7753..455a4d2 100644
--- a/tutorials/TUTORIAL_createAssoPerm.Rmd
+++ b/vignettes/create_asso_perm.Rmd
@@ -1,16 +1,30 @@
---
-output: github_document
+title: "Generate permuted association matrices"
+output: rmarkdown::html_vignette
+vignette: >
+ %\VignetteIndexEntry{createAssoPerm}
+ %\VignetteEngine{knitr::rmarkdown}
+ %\VignetteEncoding{UTF-8}
---
-```{r setup, echo = FALSE}
-knitr::opts_chunk$set(fig.path="figures/tutorial_create/")
+```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
+knitr::opts_chunk$set(
+ collapse = TRUE,
+ comment = "#>"
+)
```
-## Tutorial: How to use createAssoPerm()
+```{r setup, message=FALSE, warning=FALSE}
+library(NetCoMi)
+```
-This tutorial is meant to demonstrate the options for storing and reloading
-count tables, association matrices and network properties of the permuted data
-if **permutation tests** are performed with `netCompare()` or `diffnet()`.
+This tutorial demonstrates how to use the `createAssoPerm` function
+for storing and reloading count tables, association matrices and network
+properties of the permuted data if **permutation tests** are performed with
+`netCompare()` or `diffnet()`.
NetCoMi (>=1.0.2) includes the function `createAssoPerm()` for creating either
only a matrix with permuted group labels or storing count tables and association
@@ -25,7 +39,7 @@ and passed to `netCompare()` or `diffnet()`.
The tutorial furthermore explains NetCoMi's ability to handle **matched data sets**.
-#### Network construction and analysis
+### Network construction and analysis
We use data from the American Gut Project and conduct a network comparison
between subjects with and without lactose intolerance.
@@ -110,7 +124,7 @@ legend("bottom", title = "estimated correlation:", legend = c("+","-"),
bty = "n", horiz = TRUE)
```
-#### Network comparison via the "classical way"
+### Network comparison via the "classical way"
We conduct a network comparison with permutation tests to examine whether group
differences are significant. In order to reduce the execution time, only 100
@@ -175,7 +189,7 @@ plot(diffnet_amgut, adjusted = FALSE,
```
-#### Network comparison using createAssoPerm()
+### Network comparison using createAssoPerm()
This time, the permutation association matrices are generated using
`createAssoPerm()` and then passed to `netCompare()`.
@@ -246,7 +260,7 @@ filematrix::close(assoPerm1)
filematrix::close(assoPerm2)
```
-#### Block-wise execution
+### Block-wise execution
Due to limited resources, it might be meaningful to estimate the associations
in blocks, that is, for a subset of permutations instead of all permutations at
@@ -325,7 +339,7 @@ close(assoPerm1)
close(assoPerm3)
```
-#### Block-wise execution (executable in parallel)
+### Block-wise execution (executable in parallel)
If the blocks should be computed in parallel, extending the `"assoPerm"` file
in each iteration would not work. To be able to run the blocks in parallel,
@@ -429,5 +443,3 @@ close(assoPerm1)
close(assoPerm4)
```
-
-
diff --git a/tutorials/TUTORIAL_cross-domain_associations.Rmd b/vignettes/cross_domain_networks.Rmd
similarity index 56%
rename from tutorials/TUTORIAL_cross-domain_associations.Rmd
rename to vignettes/cross_domain_networks.Rmd
index fb16d8c..c296f77 100644
--- a/tutorials/TUTORIAL_cross-domain_associations.Rmd
+++ b/vignettes/cross_domain_networks.Rmd
@@ -1,27 +1,35 @@
---
-output: github_document
+title: "Cross-domain networks"
+output: rmarkdown::html_vignette
+vignette: >
+ %\VignetteIndexEntry{cross_domain_network}
+ %\VignetteEngine{knitr::rmarkdown}
+ %\VignetteEncoding{UTF-8}
---
-```{r setup, echo = FALSE}
-knitr::opts_chunk$set(fig.path="figures/tutorial_cross-domain/")
+```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
+knitr::opts_chunk$set(
+ collapse = TRUE,
+ comment = "#>"
+)
```
-## Tutorial: How to compare cross-domain association networks using NetCoMi and SpiecEasi?
+```{r setup, message=FALSE, warning=FALSE}
+library(NetCoMi)
+```
+
+
+This tutorial shows how to construct a cross-domain network (e.g. a network consisting of bacteria and fungi) using SpiecEasi's ability to estimate cross-domain associations.
-In this tutorial, we use the same data as in the "Cross domain interactions"
-section of the [SpiecEasi tutorial](https://github.com/zdk123/SpiecEasi).
+We use the same data as in the "Cross domain interactions" section of the [SpiecEasi tutorial](https://github.com/zdk123/SpiecEasi).
-The samples are split into two groups and cross-domain associations
-are computed for each group using SpiecEasi. The association matrices are then
-passed to NetCoMi's netConstruct() function to conduct a network comparison
-between the two groups.
+The samples are split into two groups and cross-domain associations are computed for each group using SpiecEasi. The association matrices are then passed to NetCoMi's netConstruct() function to conduct a network comparison between the two groups.
**Note:**
-This tutorial explains how two cross-domain networks are constructed and compared.
-For constructing a single network, skip the step where the data are split into
-two groups and perform the framework only for a single data set (i.e. pass the
-estimated association matrix to the "data" argument of netConstruct() and
-continue with NetCoMi's standard pipeline).
+This tutorial explains how two cross-domain networks are constructed and **compared**. For constructing a single network, skip the step where the data are split into two groups and perform the framework only for a single data set (i.e. pass the estimated association matrix to the "data" argument of netConstruct() and continue with NetCoMi's standard pipeline).
```{r}
@@ -47,20 +55,28 @@ counts_hmp2prot_gr2 <- counts_hmp2prot[group_vec == 2, ]
set.seed(123456)
# Run SpiecEasi and create association matrix for group 1
-spiec_result_gr1 <- multi.spiec.easi(list(counts_hmp216S_gr1, counts_hmp2prot_gr1),
- method='mb', nlambda=40,
- lambda.min.ratio=1e-2,
- pulsar.params = list(thresh = 0.05))
+# Note: Increase nlambda and rep.num for real data sets
+spiec_result_gr1 <- multi.spiec.easi(list(counts_hmp216S_gr1,
+ counts_hmp2prot_gr1),
+ method='mb',
+ nlambda=10,
+ lambda.min.ratio=1e-2,
+ pulsar.params = list(thresh = 0.05,
+ rep.num = 10))
assoMat1 <- SpiecEasi::symBeta(SpiecEasi::getOptBeta(spiec_result_gr1), mode = "ave")
assoMat1 <- as.matrix(assoMat1)
# Run SpiecEasi and create association matrix for group 2
-spiec_result_gr2 <- multi.spiec.easi(list(counts_hmp216S_gr2, counts_hmp2prot_gr2),
- method='mb', nlambda=40,
- lambda.min.ratio=1e-2,
- pulsar.params = list(thresh = 0.05))
+# Note: Increase nlambda and rep.num for real data sets
+spiec_result_gr2 <- multi.spiec.easi(list(counts_hmp216S_gr2,
+ counts_hmp2prot_gr2),
+ method='mb',
+ nlambda=10,
+ lambda.min.ratio=1e-2,
+ pulsar.params = list(thresh = 0.05,
+ rep.num = 10))
assoMat2 <- SpiecEasi::symBeta(SpiecEasi::getOptBeta(spiec_result_gr2), mode = "ave")
diff --git a/vignettes/diffnet.Rmd b/vignettes/diffnet.Rmd
index 87a2347..46d1c7e 100644
--- a/vignettes/diffnet.Rmd
+++ b/vignettes/diffnet.Rmd
@@ -8,6 +8,9 @@ vignette: >
---
```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
diff --git a/vignettes/dissimilarity_networks.Rmd b/vignettes/dissimilarity_networks.Rmd
index 0f0d866..56665f9 100644
--- a/vignettes/dissimilarity_networks.Rmd
+++ b/vignettes/dissimilarity_networks.Rmd
@@ -8,6 +8,9 @@ vignette: >
---
```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
diff --git a/vignettes/netcompare.Rmd b/vignettes/net_comparison.Rmd
similarity index 99%
rename from vignettes/netcompare.Rmd
rename to vignettes/net_comparison.Rmd
index fc03110..98064fe 100644
--- a/vignettes/netcompare.Rmd
+++ b/vignettes/net_comparison.Rmd
@@ -8,6 +8,9 @@ vignette: >
---
```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
diff --git a/vignettes/soil_example.Rmd b/vignettes/soil_example.Rmd
index 46cf8b6..91014a0 100644
--- a/vignettes/soil_example.Rmd
+++ b/vignettes/soil_example.Rmd
@@ -8,6 +8,9 @@ vignette: >
---
```{r, include = FALSE}
+# Suppress title check warning
+options(rmarkdown.html_vignette.check_title = FALSE)
+
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"