diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json
index 0f6464dc..27f7da4b 100644
--- a/.vscode/c_cpp_properties.json
+++ b/.vscode/c_cpp_properties.json
@@ -11,7 +11,6 @@
"/usr/local/include/umfpack"
],
"defines": [
- "WITH_INTEL_DSS",
"USE_INTEL_MKL"
],
"compilerPath": "/usr/bin/gcc",
@@ -21,4 +20,4 @@
}
],
"version": 4
-}
+}
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 0c2cb93a..37b0ce1f 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -39,7 +39,6 @@
"ifort",
"IIIₛ",
"ᵢⱼₖₗ",
- "inteldss",
"iomp",
"irhs",
"jobvl",
diff --git a/README.md b/README.md
index 4502c031..bfe3248b 100644
--- a/README.md
+++ b/README.md
@@ -29,7 +29,7 @@ Next, we recommend looking at the [russell_sparse](https://github.com/cpmech/rus
Available crates:
- [![Crates.io](https://img.shields.io/crates/v/russell_lab.svg)](https://crates.io/crates/russell_lab) [russell_lab](https://github.com/cpmech/russell/tree/main/russell_lab) Matrix-vector laboratory for linear algebra (with OpenBLAS or Intel MKL)
-- [![Crates.io](https://img.shields.io/crates/v/russell_sparse.svg)](https://crates.io/crates/russell_sparse) [russell_sparse](https://github.com/cpmech/russell/tree/main/russell_sparse) Sparse matrix tools and solvers (with MUMPS, UMFPACK, and Intel DSS)
+- [![Crates.io](https://img.shields.io/crates/v/russell_sparse.svg)](https://crates.io/crates/russell_sparse) [russell_sparse](https://github.com/cpmech/russell/tree/main/russell_sparse) Sparse matrix tools and solvers (with MUMPS and UMFPACK)
- [![Crates.io](https://img.shields.io/crates/v/russell_stat.svg)](https://crates.io/crates/russell_stat) [russell_stat](https://github.com/cpmech/russell/tree/main/russell_stat) Statistics calculations, probability distributions, and pseudo random numbers
- [![Crates.io](https://img.shields.io/crates/v/russell_tensor.svg)](https://crates.io/crates/russell_tensor) [russell_tensor](https://github.com/cpmech/russell/tree/main/russell_tensor) Tensor analysis structures and functions for continuum mechanics
@@ -365,7 +365,6 @@ fn main() -> Result<(), StrError> {
- [x] Implement the Compressed Sparse Column format (CSC)
- [x] Implement the Compressed Sparse Row format (CSC)
- [x] Improve the C-interface to UMFPACK and MUMPS
- - [x] Implement the C-interface to Intel DSS
- [ ] Write the conversion from COO to CSC in Rust
- [ ] Possibly re-write (after benchmarking) the conversion from COO to CSR
- [ ] Re-study the possibility of wrapping SuperLU (see deleted branch)
diff --git a/russell_ode/Cargo.toml b/russell_ode/Cargo.toml
index d3227e92..23f08f3f 100644
--- a/russell_ode/Cargo.toml
+++ b/russell_ode/Cargo.toml
@@ -11,6 +11,9 @@ readme = "README.md"
categories = ["mathematics", "science"]
keywords = ["differential equations", "numerical methods", "solver"]
+[features]
+intel_mkl = ["russell_lab/intel_mkl", "russell_sparse/intel_mkl"]
+
[dependencies]
russell_lab = { path = "../russell_lab", version = "0.8" }
russell_sparse = { path = "../russell_sparse", version = "0.8" }
diff --git a/russell_sparse/Cargo.toml b/russell_sparse/Cargo.toml
index 376f3e03..dc3b87fa 100644
--- a/russell_sparse/Cargo.toml
+++ b/russell_sparse/Cargo.toml
@@ -13,7 +13,7 @@ keywords = ["matrix", "sparse", "solver"]
[features]
local_libs = []
-intel_mkl = ["local_libs"]
+intel_mkl = ["local_libs", "russell_lab/intel_mkl"]
[dependencies]
num-complex = { version = "0.4", features = ["serde"] }
diff --git a/russell_sparse/README.md b/russell_sparse/README.md
index 0b6ecf5e..b691a95a 100644
--- a/russell_sparse/README.md
+++ b/russell_sparse/README.md
@@ -14,7 +14,7 @@ _This crate is part of [Russell - Rust Scientific Library](https://github.com/cp
## Introduction
-This crate implements tools for handling sparse matrices and functions to solve large sparse systems using the best libraries out there, such as [UMFPACK (recommended)](https://github.com/DrTimothyAldenDavis/SuiteSparse) and [MUMPS (for very large systems)](https://mumps-solver.org). Optionally, you may want to use the [Intel DSS solver](https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/direct-sparse-solver-dss-interface-routines.html).
+This crate implements tools for handling sparse matrices and functions to solve large sparse systems using the best libraries out there, such as [UMFPACK (recommended)](https://github.com/DrTimothyAldenDavis/SuiteSparse) and [MUMPS (for very large systems)](https://mumps-solver.org).
We have three storage formats for sparse matrices:
@@ -26,7 +26,7 @@ Additionally, to unify the handling of the above sparse matrix data structures,
* SparseMatrix: Either a COO, CSC, or CSR matrix
-The COO matrix is the best when we need to update the values of the matrix because it has easy access to the triples (i, j, aij). For instance, the repetitive access is the primary use case for codes based on the finite element method (FEM) for approximating partial differential equations. Moreover, the COO matrix allows storing duplicate entries; for example, the triple `(0, 0, 123.0)` can be stored as two triples `(0, 0, 100.0)` and `(0, 0, 23.0)`. Again, this is the primary need for FEM codes because of the so-called assembly process where elements add to the same positions in the "global stiffness" matrix. Nonetheless, the duplicate entries must be summed up at some stage for the linear solver (e.g., MUMPS, UMFPACK, and Intel DSS). These linear solvers also use the more memory-efficient storage formats CSC and CSR. See the [russell_sparse documentation](https://docs.rs/russell_sparse) for further information.
+The COO matrix is the best when we need to update the values of the matrix because it has easy access to the triples (i, j, aij). For instance, the repetitive access is the primary use case for codes based on the finite element method (FEM) for approximating partial differential equations. Moreover, the COO matrix allows storing duplicate entries; for example, the triple `(0, 0, 123.0)` can be stored as two triples `(0, 0, 100.0)` and `(0, 0, 23.0)`. Again, this is the primary need for FEM codes because of the so-called assembly process where elements add to the same positions in the "global stiffness" matrix. Nonetheless, the duplicate entries must be summed up at some stage for the linear solver (e.g., MUMPS, UMFPACK). These linear solvers also use the more memory-efficient storage formats CSC and CSR. See the [russell_sparse documentation](https://docs.rs/russell_sparse) for further information.
This library also provides functions to read and write Matrix Market files containing (huge) sparse matrices that can be used in performance benchmarking or other studies. The [read_matrix_market()] function reads a Matrix Market file and returns a [CooMatrix]. To write a Matrix Market file, we can use the function [write_matrix_market()], which takes a [SparseMatrix] and, thus, automatically convert COO to CSC or COO to CSR, also performing the sum of duplicates. The `write_matrix_market` also writes an SMAT file (almost like the Matrix Market format) without the header and with zero-based indices. The SMAT file can be given to the fantastic [Vismatrix](https://github.com/cpmech/vismatrix) tool to visualize the sparse matrix structure and values interactively; see the example below.
@@ -38,7 +38,7 @@ See the documentation for further information:
## Installation
-This crate depends on `russell_lab`, which, in turn, depends on an efficient BLAS library such as [OpenBLAS](https://github.com/OpenMathLib/OpenBLAS) and [Intel MKL](https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/overview.html). This crate also depends on [UMFPACK](https://github.com/DrTimothyAldenDavis/SuiteSparse), [MUMPS](https://mumps-solver.org), and, optionally, on [Intel DSS](https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/direct-sparse-solver-dss-interface-routines.html).
+This crate depends on `russell_lab`, which, in turn, depends on an efficient BLAS library such as [OpenBLAS](https://github.com/OpenMathLib/OpenBLAS) and [Intel MKL](https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/overview.html). This crate also depends on [UMFPACK](https://github.com/DrTimothyAldenDavis/SuiteSparse) and [MUMPS](https://mumps-solver.org).
[The root README file presents the steps to install the required dependencies.](https://github.com/cpmech/russell)
@@ -245,7 +245,7 @@ Also, to reproduce the issue, we need:
## For developers
-* The `c_code` directory contains a thin wrapper to the sparse solvers (MUMPS, UMFPACK, and Intel DSS)
+* The `c_code` directory contains a thin wrapper to the sparse solvers (MUMPS, UMFPACK)
* The `build.rs` file uses the crate `cc` to build the C-wrappers
* The `zscripts` directory also contains following:
* `memcheck.bash`: Checks for memory leaks on the C-code using Valgrind
diff --git a/russell_sparse/build.rs b/russell_sparse/build.rs
index 7b69bbe7..8a5e2bc4 100644
--- a/russell_sparse/build.rs
+++ b/russell_sparse/build.rs
@@ -1,6 +1,3 @@
-#[cfg(feature = "intel_mkl")]
-const MKL_VERSION: &str = "2023.2.0";
-
#[cfg(feature = "local_libs")]
fn handle_local_libs() {
// local MUMPS
@@ -42,40 +39,6 @@ fn handle_local_libs() {
println!("cargo:rustc-link-lib=dylib=umfpack");
}
-#[cfg(feature = "intel_mkl")]
-fn handle_intel_mkl() {
- // Find the link libs with: pkg-config --libs mkl-dynamic-lp64-iomp
- cc::Build::new()
- .file("c_code/interface_intel_dss.c")
- .include(format!("/opt/intel/oneapi/mkl/{}/include", MKL_VERSION))
- .define("WITH_INTEL_DSS", None)
- .compile("c_code_interface_intel_dss");
- println!(
- "cargo:rustc-link-search=native=/opt/intel/oneapi/mkl/{}/lib/intel64",
- MKL_VERSION
- );
- println!(
- "cargo:rustc-link-search=native=/opt/intel/oneapi/compiler/{}/linux/compiler/lib/intel64_lin",
- MKL_VERSION
- );
- println!("cargo:rustc-link-lib=mkl_intel_lp64");
- println!("cargo:rustc-link-lib=mkl_intel_thread");
- println!("cargo:rustc-link-lib=mkl_core");
- println!("cargo:rustc-link-lib=pthread");
- println!("cargo:rustc-link-lib=m");
- println!("cargo:rustc-link-lib=dl");
- println!("cargo:rustc-link-lib=iomp5");
- println!("cargo:rustc-cfg=with_intel_dss");
-}
-
-#[cfg(not(feature = "intel_mkl"))]
-fn handle_intel_mkl() {
- cc::Build::new()
- .file("c_code/interface_intel_dss.c")
- .compile("c_code_interface_intel_dss");
-}
-
fn main() {
handle_local_libs();
- handle_intel_mkl();
}
diff --git a/russell_sparse/c_code/interface_intel_dss.c b/russell_sparse/c_code/interface_intel_dss.c
deleted file mode 100644
index f77e6348..00000000
--- a/russell_sparse/c_code/interface_intel_dss.c
+++ /dev/null
@@ -1,240 +0,0 @@
-#include
-#include
-
-#ifdef WITH_INTEL_DSS
-#include "mkl_dss.h"
-#else
-#define MKL_INT int
-#define _MKL_DSS_HANDLE_t void *
-#define UNUSED(var) (void)var
-#endif
-
-#include "constants.h"
-
-/// @brief Wraps the Intel DSS solver
-/// @note The DSS uses a row-major UPPER triangular storage format.
-/// @note The matrix is compressed row-by-row.
-/// @note For symmetric matrices only non-zero elements in the UPPER triangular half of the matrix are stored.
-/// @note For symmetric matrices, the zero diagonal entries must be stored as well.
-struct InterfaceIntelDSS {
- /// @brief Holds the Intel DSS options
- MKL_INT dss_opt;
-
- /// @brief Holds the Intel DSS sym option
- MKL_INT dss_sym;
-
- /// @brief Holds the Intel DSS type option
- MKL_INT dss_type;
-
- /// @brief Holds the Intel DSS handle (allocated in analyze)
- _MKL_DSS_HANDLE_t handle;
-
- /// @brief indicates that the initialization has been completed
- C_BOOL initialization_completed;
-
- /// @brief Indicates that the factorization (at least once) has been completed
- C_BOOL factorization_completed;
-};
-
-/// @brief Allocates a new Intel DSS interface
-/// @return A pointer to the solver interface
-struct InterfaceIntelDSS *solver_intel_dss_new() {
-#ifdef WITH_INTEL_DSS
- struct InterfaceIntelDSS *solver = (struct InterfaceIntelDSS *)malloc(sizeof(struct InterfaceIntelDSS));
-
- if (solver == NULL) {
- return NULL;
- }
-
- solver->handle = NULL;
- solver->initialization_completed = C_FALSE;
- solver->factorization_completed = C_FALSE;
-
- return solver;
-#else
- return NULL;
-#endif
-}
-
-/// @brief Deallocates the Intel DSS interface
-/// @param solver Is a pointer to the solver interface
-void solver_intel_dss_drop(struct InterfaceIntelDSS *solver) {
-#ifdef WITH_INTEL_DSS
- if (solver == NULL) {
- return;
- }
-
- if (solver->handle != NULL) {
- dss_delete(solver->handle, solver->dss_opt);
- }
-
- free(solver);
-#else
- UNUSED(solver);
-#endif
-}
-
-/// @brief Defines the structure and reorder analysis
-/// @return A success or fail code
-int32_t solver_intel_dss_initialize(struct InterfaceIntelDSS *solver,
- C_BOOL general_symmetric,
- C_BOOL positive_definite,
- int32_t ndim,
- const int32_t *row_pointers,
- const int32_t *col_indices) {
-#ifdef WITH_INTEL_DSS
-
- if (solver == NULL) {
- return ERROR_NULL_POINTER;
- }
-
- if (solver->initialization_completed == C_TRUE) {
- return ERROR_ALREADY_INITIALIZED;
- }
-
- solver->dss_opt = MKL_DSS_MSG_LVL_WARNING + MKL_DSS_TERM_LVL_ERROR + MKL_DSS_ZERO_BASED_INDEXING;
-
- solver->dss_sym = MKL_DSS_NON_SYMMETRIC;
- if (general_symmetric == C_TRUE) {
- solver->dss_sym = MKL_DSS_SYMMETRIC;
- }
-
- solver->dss_type = MKL_DSS_INDEFINITE;
- if (positive_definite == C_TRUE) {
- solver->dss_type = MKL_DSS_POSITIVE_DEFINITE;
- }
-
- MKL_INT status = dss_create(solver->handle, solver->dss_opt);
-
- if (status != MKL_DSS_SUCCESS) {
- return status;
- }
-
- if (solver->handle == NULL) {
- return ERROR_NULL_POINTER;
- }
-
- // define the non-zero structure of the matrix
- MKL_INT nnz = row_pointers[ndim];
- status = dss_define_structure(solver->handle,
- solver->dss_sym,
- row_pointers,
- ndim,
- ndim,
- col_indices,
- nnz);
- if (status != MKL_DSS_SUCCESS) {
- return status;
- }
-
- // reorder the matrix (NOTE: we cannot call reorder again)
- status = dss_reorder(solver->handle, solver->dss_opt, 0);
- if (status != MKL_DSS_SUCCESS) {
- return status;
- }
-
- solver->initialization_completed = C_TRUE;
-
- return SUCCESSFUL_EXIT;
-
-#else
- UNUSED(solver);
- UNUSED(general_symmetric);
- UNUSED(positive_definite);
- UNUSED(ndim);
- UNUSED(row_pointers);
- UNUSED(col_indices);
- return ERROR_NOT_AVAILABLE;
-#endif
-}
-
-/// @brief Performs the factorization
-/// @return A success or fail code
-int32_t solver_intel_dss_factorize(struct InterfaceIntelDSS *solver,
- double *determinant_coefficient,
- double *determinant_exponent,
- C_BOOL compute_determinant,
- const double *values) {
-#ifdef WITH_INTEL_DSS
-
- if (solver == NULL) {
- return ERROR_NULL_POINTER;
- }
-
- if (solver->initialization_completed == C_FALSE) {
- return ERROR_NEED_INITIALIZATION;
- }
-
- // factor the matrix
- MKL_INT status = dss_factor_real(solver->handle, solver->dss_type, values);
-
- // compute determinant
- *determinant_coefficient = 0.0;
- *determinant_exponent = 0.0;
- if (compute_determinant == C_TRUE) {
- _CHARACTER_t stat_in[] = "determinant";
- _DOUBLE_PRECISION_t stat_out[5];
- status = dss_statistics(solver->handle, solver->dss_opt, stat_in, stat_out);
- if (status != MKL_DSS_SUCCESS) {
- return status;
- }
- *determinant_exponent = stat_out[0];
- *determinant_coefficient = stat_out[1];
- } else {
- *determinant_coefficient = 0.0;
- *determinant_exponent = 0.0;
- }
-
- solver->factorization_completed = C_TRUE;
-
- // done
- return status;
-
-#else
- UNUSED(solver);
- UNUSED(determinant_coefficient);
- UNUSED(determinant_exponent);
- UNUSED(compute_determinant);
- UNUSED(values);
- return ERROR_NOT_AVAILABLE;
-#endif
-}
-
-/// @brief Computes the solution of the linear system
-/// @param solver Is a pointer to the solver interface
-/// @param x Is the left-hand side vector (unknowns)
-/// @param rhs Is the right-hand side vector
-/// @return A success or fail code
-int32_t solver_intel_dss_solve(struct InterfaceIntelDSS *solver,
- double *x,
- const double *rhs) {
-#ifdef WITH_INTEL_DSS
-
- if (solver == NULL) {
- return ERROR_NULL_POINTER;
- }
-
- if (solver->handle == NULL) {
- return ERROR_NULL_POINTER;
- }
-
- if (solver->factorization_completed == C_FALSE) {
- return ERROR_NEED_FACTORIZATION;
- }
-
- // get the solution vector
- MKL_INT n_rhs = 1;
- MKL_INT status = dss_solve_real(solver->handle,
- solver->dss_opt,
- rhs,
- n_rhs,
- x);
- return status;
-
-#else
- UNUSED(solver);
- UNUSED(x);
- UNUSED(rhs);
- return ERROR_NOT_AVAILABLE;
-#endif
-}
diff --git a/russell_sparse/examples/nonlinear_system_4eqs.rs b/russell_sparse/examples/nonlinear_system_4eqs.rs
index 270a9461..49c26e15 100644
--- a/russell_sparse/examples/nonlinear_system_4eqs.rs
+++ b/russell_sparse/examples/nonlinear_system_4eqs.rs
@@ -22,7 +22,6 @@ fn main() -> Result<(), StrError> {
let genie = match opt.genie.to_lowercase().as_str() {
"mumps" => Genie::Mumps,
"umfpack" => Genie::Umfpack,
- "dss" => Genie::IntelDss,
_ => Genie::Umfpack,
};
println!("... solving problem with {:?} ...", genie);
diff --git a/russell_sparse/src/bin/mem_check.rs b/russell_sparse/src/bin/mem_check.rs
index 6bc776a4..249522c5 100644
--- a/russell_sparse/src/bin/mem_check.rs
+++ b/russell_sparse/src/bin/mem_check.rs
@@ -9,7 +9,6 @@ fn test_solver(genie: Genie) {
match genie {
Genie::Mumps => println!("Testing MUMPS solver\n"),
Genie::Umfpack => println!("Testing UMFPACK solver\n"),
- Genie::IntelDss => println!("Testing Intel DSS solver\n"),
}
let mut solver = match LinSolver::new(genie) {
@@ -60,7 +59,6 @@ fn test_complex_solver(genie: Genie) {
match genie {
Genie::Mumps => println!("Testing Complex MUMPS solver\n"),
Genie::Umfpack => println!("Testing Complex UMFPACK solver\n"),
- Genie::IntelDss => println!("Testing Complex Intel DSS solver\n"),
}
let mut solver = match ComplexLinSolver::new(genie) {
@@ -74,7 +72,6 @@ fn test_complex_solver(genie: Genie) {
let coo = match genie {
Genie::Mumps => Samples::complex_symmetric_3x3_lower().0,
Genie::Umfpack => Samples::complex_symmetric_3x3_full().0,
- Genie::IntelDss => panic!("TODO"),
};
let mut mat = ComplexSparseMatrix::from_coo(coo);
@@ -115,7 +112,6 @@ fn test_solver_singular(genie: Genie) {
match genie {
Genie::Mumps => println!("Testing MUMPS solver (singular matrix)\n"),
Genie::Umfpack => println!("Testing UMFPACK solver (singular matrix)\n"),
- Genie::IntelDss => println!("Testing Intel DSS solver (singular matrix)\n"),
}
let (ndim, nnz) = (2, 2);
@@ -148,9 +144,6 @@ fn main() {
// real
test_solver(Genie::Mumps);
test_solver(Genie::Umfpack);
- if cfg!(with_intel_dss) {
- test_solver(Genie::IntelDss);
- }
// complex
test_complex_solver(Genie::Mumps);
@@ -159,7 +152,6 @@ fn main() {
// singular real
test_solver_singular(Genie::Mumps);
test_solver_singular(Genie::Umfpack);
- // Note: Intel DSS cannot handle singular matrices
println!("----------------------------------------------------------------------\n");
}
diff --git a/russell_sparse/src/bin/solve_matrix_market.rs b/russell_sparse/src/bin/solve_matrix_market.rs
index de0d2208..59b54c4d 100644
--- a/russell_sparse/src/bin/solve_matrix_market.rs
+++ b/russell_sparse/src/bin/solve_matrix_market.rs
@@ -73,7 +73,6 @@ fn main() -> Result<(), StrError> {
let handling = match genie {
Genie::Mumps => MMsymOption::LeaveAsLower,
Genie::Umfpack => MMsymOption::MakeItFull,
- Genie::IntelDss => MMsymOption::SwapToUpper,
};
// configuration parameters
@@ -140,7 +139,6 @@ fn main() -> Result<(), StrError> {
let tolerance = match genie {
Genie::Mumps => 1e-10,
Genie::Umfpack => 1e-10,
- Genie::IntelDss => 1e-10,
};
let correct_x = get_bfwb62_correct_x();
for i in 0..nrow {
diff --git a/russell_sparse/src/complex_lin_solver.rs b/russell_sparse/src/complex_lin_solver.rs
index 2db88d93..9a60fe3d 100644
--- a/russell_sparse/src/complex_lin_solver.rs
+++ b/russell_sparse/src/complex_lin_solver.rs
@@ -70,7 +70,6 @@ impl<'a> ComplexLinSolver<'a> {
let actual: Box = match genie {
Genie::Mumps => Box::new(ComplexSolverMUMPS::new()?),
Genie::Umfpack => Box::new(ComplexSolverUMFPACK::new()?),
- Genie::IntelDss => panic!("TODO"),
};
Ok(ComplexLinSolver { actual })
}
@@ -99,7 +98,6 @@ impl<'a> ComplexLinSolver<'a> {
///
/// 1. For symmetric matrices, `MUMPS` requires that the symmetry/storage be Lower or Full.
/// 2. For symmetric matrices, `UMFPACK` requires that the symmetry/storage be Full.
- /// 3. For symmetric matrices, `IntelDSS` requires that the symmetry/storage be Upper.
/// 4. This function calls the actual implementation (genie) via the functions `factorize`, and `solve`.
/// 5. This function is best for a **single-use**, whereas the actual
/// solver should be considered for a recurrent use (e.g., inside a loop).
diff --git a/russell_sparse/src/enums.rs b/russell_sparse/src/enums.rs
index 4eafff13..9c82de93 100644
--- a/russell_sparse/src/enums.rs
+++ b/russell_sparse/src/enums.rs
@@ -13,11 +13,6 @@ pub enum Genie {
///
/// Reference:
Umfpack,
-
- /// Selects Intel DSS (direct sparse solver)
- ///
- /// Reference:
- IntelDss,
}
/// Specifies how the matrix components are stored
@@ -26,7 +21,7 @@ pub enum Storage {
/// Lower triangular storage for symmetric matrix (e.g., for MUMPS)
Lower,
- /// Upper triangular storage for symmetric matrix (e.g., for Intel DSS)
+ /// Upper triangular storage for symmetric matrix
Upper,
/// Full matrix storage for symmetric or unsymmetric matrix (e.g., for UMFPACK)
@@ -63,8 +58,6 @@ pub enum MMsymOption {
///
/// **Note:** Since lower triangular is standard in MatrixMarket,
/// this option will swap the lower triangle to the upper triangle.
- ///
- /// This option is useful for the Intel DSS solver.
SwapToUpper,
/// Make the matrix full (if symmetric)
@@ -147,14 +140,12 @@ impl Genie {
/// ```text
/// "mumps" => Genie::Mumps,
/// "umfpack" => Genie::Umfpack,
- /// "inteldss" => Genie::IntelDss,
/// _ => Genie::Umfpack,
/// ```
pub fn from(genie: &str) -> Self {
match genie.to_lowercase().as_str() {
"mumps" => Genie::Mumps,
"umfpack" => Genie::Umfpack,
- "inteldss" => Genie::IntelDss,
_ => Genie::Umfpack,
}
}
@@ -163,13 +154,11 @@ impl Genie {
/// ```text
/// Genie::Mumps => "mumps"
/// Genie::Umfpack => "umfpack"
- /// Genie::IntelDss => "inteldss"
/// ```
pub fn to_string(&self) -> String {
match self {
Genie::Mumps => "mumps".to_string(),
Genie::Umfpack => "umfpack".to_string(),
- Genie::IntelDss => "inteldss".to_string(),
}
}
@@ -178,13 +167,11 @@ impl Genie {
/// ```text
/// MUMPS : Storage::Lower
/// UMFPACK : Storage::Full
- /// Intel DSS : Storage::Upper
/// ````
pub fn storage(&self) -> Storage {
match self {
Genie::Mumps => Storage::Lower,
Genie::Umfpack => Storage::Full,
- Genie::IntelDss => Storage::Upper,
}
}
@@ -497,23 +484,18 @@ mod tests {
fn genie_functions_work() {
assert_eq!(Genie::from("mumps"), Genie::Mumps);
assert_eq!(Genie::from("umfpack"), Genie::Umfpack);
- assert_eq!(Genie::from("inteldss"), Genie::IntelDss);
assert_eq!(Genie::from("blah-blah-blah"), Genie::Umfpack);
assert_eq!(Genie::from("Mumps"), Genie::Mumps);
assert_eq!(Genie::from("Umfpack"), Genie::Umfpack);
- assert_eq!(Genie::from("IntelDss"), Genie::IntelDss);
let l = Storage::Lower;
- let u = Storage::Upper;
let f = Storage::Full;
let gl = Symmetry::General(l);
- let gu = Symmetry::General(u);
let gf = Symmetry::General(f);
let pl = Symmetry::PositiveDefinite(l);
- let pu = Symmetry::PositiveDefinite(u);
let pf = Symmetry::PositiveDefinite(f);
let genie = Genie::Mumps;
@@ -531,14 +513,6 @@ mod tests {
assert_eq!(genie.symmetry(true, false), gf);
assert_eq!(genie.symmetry(false, true), pf);
assert_eq!(genie.symmetry(true, true), pf);
-
- let genie = Genie::IntelDss;
- assert_eq!(genie.to_string(), "inteldss");
- assert_eq!(genie.storage(), u);
- assert_eq!(genie.symmetry(false, false), Symmetry::No);
- assert_eq!(genie.symmetry(true, false), gu);
- assert_eq!(genie.symmetry(false, true), pu);
- assert_eq!(genie.symmetry(true, true), pu);
}
#[test]
diff --git a/russell_sparse/src/lib.rs b/russell_sparse/src/lib.rs
index ba282ab8..d62765f9 100644
--- a/russell_sparse/src/lib.rs
+++ b/russell_sparse/src/lib.rs
@@ -21,11 +21,10 @@
//! * [CooMatrix], [CscMatrix], [CsrMatrix], [SparseMatrix] -- For real numbers represented by `f64`
//! * [ComplexCooMatrix], [ComplexCscMatrix], [ComplexCsrMatrix], [ComplexSparseMatrix] -- For complex numbers represented by [num_complex::Complex64]
//!
-//! The COO matrix is the best when we need to update the values of the matrix because it has easy access to the triples (i, j, aij). For instance, the repetitive access is the primary use case for codes based on the finite element method (FEM) for approximating partial differential equations. Moreover, the COO matrix allows storing duplicate entries; for example, the triple `(0, 0, 123.0)` can be stored as two triples `(0, 0, 100.0)` and `(0, 0, 23.0)`. Again, this is the primary need for FEM codes because of the so-called assembly process where elements add to the same positions in the "global stiffness" matrix. Nonetheless, the duplicate entries must be summed up at some stage for the linear solver (e.g., MUMPS, UMFPACK, and Intel DSS). These linear solvers also use the more memory-efficient storage formats CSC and CSR. The following is the default input for these solvers:
+//! The COO matrix is the best when we need to update the values of the matrix because it has easy access to the triples (i, j, aij). For instance, the repetitive access is the primary use case for codes based on the finite element method (FEM) for approximating partial differential equations. Moreover, the COO matrix allows storing duplicate entries; for example, the triple `(0, 0, 123.0)` can be stored as two triples `(0, 0, 100.0)` and `(0, 0, 23.0)`. Again, this is the primary need for FEM codes because of the so-called assembly process where elements add to the same positions in the "global stiffness" matrix. Nonetheless, the duplicate entries must be summed up at some stage for the linear solver (e.g., MUMPS, UMFPACK). These linear solvers also use the more memory-efficient storage formats CSC and CSR. The following is the default input for these solvers:
//!
//! * [MUMPS](https://mumps-solver.org) -- requires a COO matrix as input internally
//! * [UMFPACK](https://github.com/DrTimothyAldenDavis/SuiteSparse) -- requires a CSC matrix as input internally
-//! * [Intel DSS](https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/direct-sparse-solver-dss-interface-routines.html) -- requires a CSR matrix as input internally
//!
//! Nonetheless, the implemented interface to the above linear solvers takes a [SparseMatrix] as input, which will automatically be converted from COO to CSC or COO to CSR, as appropriate.
//!
@@ -39,7 +38,6 @@
//!
//! * [SolverMUMPS] -- thin wrapper to the MUMPS solver
//! * [SolverUMFPACK] -- thin wrapper to the UMFPACK solver
-//! * [SolverIntelDSS] -- thin wrapper to the Intel DSS solver
//!
//! This library also provides a unifying Trait called [LinSolTrait], which the above structures implement. In addition, the [LinSolver] structure holds a "pointer" to one of the above structures and is a more convenient way to use the linear solvers in generic codes when we need to switch from solver to solver (e.g., for benchmarking). After allocating a [LinSolver], if needed, we can access the actual implementations (interfaces/thin wrappers) via the [LinSolver::actual] data member.
//!
@@ -78,8 +76,8 @@
//! // allocate a square matrix and store as COO matrix
//! // ┌ ┐
//! // │ 1 0 2 │
-//! // │ 0 0 3 │ << the diagonal 0 entry is optional,
-//! // │ 4 5 6 │ but should be saved for Intel DSS
+//! // │ 0 0 3 │
+//! // │ 4 5 6 │
//! // └ ┘
//! let (nrow, ncol, nnz) = (3, 3, 6);
//! let mut coo = CooMatrix::new(nrow, ncol, nnz, None)?;
@@ -254,7 +252,6 @@ mod lin_solver;
pub mod prelude;
mod read_matrix_market;
mod samples;
-mod solver_intel_dss;
mod solver_mumps;
mod solver_umfpack;
mod sparse_matrix;
@@ -274,7 +271,6 @@ pub use crate::lin_sol_params::*;
pub use crate::lin_solver::*;
pub use crate::read_matrix_market::*;
pub use crate::samples::*;
-pub use crate::solver_intel_dss::*;
pub use crate::solver_mumps::*;
pub use crate::solver_umfpack::*;
pub use crate::sparse_matrix::*;
diff --git a/russell_sparse/src/lin_solver.rs b/russell_sparse/src/lin_solver.rs
index f0d6a8db..69e14238 100644
--- a/russell_sparse/src/lin_solver.rs
+++ b/russell_sparse/src/lin_solver.rs
@@ -1,4 +1,4 @@
-use super::{Genie, LinSolParams, SolverIntelDSS, SolverMUMPS, SolverUMFPACK, SparseMatrix, StatsLinSol};
+use super::{Genie, LinSolParams, SolverMUMPS, SolverUMFPACK, SparseMatrix, StatsLinSol};
use crate::StrError;
use russell_lab::Vector;
@@ -64,7 +64,6 @@ impl<'a> LinSolver<'a> {
let actual: Box = match genie {
Genie::Mumps => Box::new(SolverMUMPS::new()?),
Genie::Umfpack => Box::new(SolverUMFPACK::new()?),
- Genie::IntelDss => Box::new(SolverIntelDSS::new()?),
};
Ok(LinSolver { actual })
}
@@ -93,7 +92,6 @@ impl<'a> LinSolver<'a> {
///
/// 1. For symmetric matrices, `MUMPS` requires that the symmetry/storage be Lower or Full.
/// 2. For symmetric matrices, `UMFPACK` requires that the symmetry/storage be Full.
- /// 3. For symmetric matrices, `IntelDSS` requires that the symmetry/storage be Upper.
/// 4. This function calls the actual implementation (genie) via the functions `factorize`, and `solve`.
/// 5. This function is best for a **single-use**, whereas the actual
/// solver should be considered for a recurrent use (e.g., inside a loop).
@@ -166,9 +164,6 @@ mod tests {
fn lin_solver_new_works() {
LinSolver::new(Genie::Mumps).unwrap();
LinSolver::new(Genie::Umfpack).unwrap();
- if cfg!(with_intel_dss) {
- LinSolver::new(Genie::IntelDss).unwrap();
- }
}
#[test]
diff --git a/russell_sparse/src/prelude.rs b/russell_sparse/src/prelude.rs
index 5328c48d..c8131bda 100644
--- a/russell_sparse/src/prelude.rs
+++ b/russell_sparse/src/prelude.rs
@@ -14,7 +14,6 @@ pub use crate::enums::*;
pub use crate::lin_sol_params::LinSolParams;
pub use crate::lin_solver::*;
pub use crate::read_matrix_market;
-pub use crate::solver_intel_dss::SolverIntelDSS;
pub use crate::solver_mumps::SolverMUMPS;
pub use crate::solver_umfpack::SolverUMFPACK;
pub use crate::sparse_matrix::NumSparseMatrix;
diff --git a/russell_sparse/src/read_matrix_market.rs b/russell_sparse/src/read_matrix_market.rs
index f277433c..9eb99d19 100644
--- a/russell_sparse/src/read_matrix_market.rs
+++ b/russell_sparse/src/read_matrix_market.rs
@@ -179,7 +179,7 @@ impl MatrixMarketData {
/// may be used to:
///
/// 1. Leave the data as it is, i.e., return a Lower Triangular CooMatrix (e.g., for MUMPS solver)
-/// 2. Swap the lower triangle with the upper triangle, i.e., return an Upper Triangular CooMatrix (e.g., for Intel DSS solver)
+/// 2. Swap the lower triangle with the upper triangle, i.e., return an Upper Triangular CooMatrix
/// 3. Duplicate the data to make a full matrix, i.e., return a Full CooMatrix (e.g., for UMFPACK solver)
///
/// # Panics
diff --git a/russell_sparse/src/samples.rs b/russell_sparse/src/samples.rs
index 146c8eca..ecf0e952 100644
--- a/russell_sparse/src/samples.rs
+++ b/russell_sparse/src/samples.rs
@@ -279,7 +279,7 @@ impl Samples {
///
/// ```text
/// 1 . 2
- /// . 0 3 << the zero diagonal value is required for Intel DSS
+ /// . 0 3
/// 4 5 6
/// ```
///
@@ -316,14 +316,14 @@ impl Samples {
coo.put(2, 0, 4.0).unwrap();
coo.put(0, 0, 1.0).unwrap();
coo.put(2, 2, 2.0).unwrap(); // << duplicate
- coo.put(1, 1, 0.0).unwrap(); // << needed for Intel DSS
+ coo.put(1, 1, 0.0).unwrap(); // << notice that 0.0 may be specified
coo.put(2, 2, 2.0).unwrap(); // << duplicate
} else {
coo.put(2, 0, 4.0).unwrap();
coo.put(0, 0, 1.0).unwrap();
coo.put(2, 2, 6.0).unwrap();
coo.put(0, 2, 2.0).unwrap();
- coo.put(1, 1, 0.0).unwrap(); // << needed for Intel DSS
+ coo.put(1, 1, 0.0).unwrap(); // << notice that 0.0 may be specified
coo.put(2, 1, 5.0).unwrap();
coo.put(1, 2, 3.0).unwrap();
}
@@ -331,7 +331,7 @@ impl Samples {
if duplicate_coo_entries {
coo.put(0, 0, 1.0).unwrap();
coo.put(0, 2, 2.0).unwrap();
- coo.put(1, 1, 0.0).unwrap(); // << needed for Intel DSS
+ coo.put(1, 1, 0.0).unwrap(); // << notice that 0.0 may be specified
coo.put(1, 2, 3.0).unwrap();
coo.put(2, 0, 4.0).unwrap();
coo.put(2, 1, 5.0).unwrap();
@@ -341,7 +341,7 @@ impl Samples {
} else {
coo.put(0, 0, 1.0).unwrap();
coo.put(0, 2, 2.0).unwrap();
- coo.put(1, 1, 0.0).unwrap(); // << needed for Intel DSS
+ coo.put(1, 1, 0.0).unwrap(); // << notice that 0.0 may be specified
coo.put(1, 2, 3.0).unwrap();
coo.put(2, 0, 4.0).unwrap();
coo.put(2, 1, 5.0).unwrap();
diff --git a/russell_sparse/src/solver_intel_dss.rs b/russell_sparse/src/solver_intel_dss.rs
deleted file mode 100644
index 02c188cd..00000000
--- a/russell_sparse/src/solver_intel_dss.rs
+++ /dev/null
@@ -1,601 +0,0 @@
-use super::{LinSolParams, LinSolTrait, SparseMatrix, StatsLinSol, Symmetry};
-use crate::auxiliary_and_constants::*;
-use crate::StrError;
-use russell_lab::{Stopwatch, Vector};
-
-/// Opaque struct holding a C-pointer to InterfaceIntelDSS
-///
-/// Reference:
-#[repr(C)]
-struct InterfaceIntelDSS {
- _data: [u8; 0],
- _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
-}
-
-/// Enforce Send on the C structure
-///
-///
-unsafe impl Send for InterfaceIntelDSS {}
-
-/// Enforce Send on the Rust structure
-///
-///
-unsafe impl Send for SolverIntelDSS {}
-
-extern "C" {
- fn solver_intel_dss_new() -> *mut InterfaceIntelDSS;
- fn solver_intel_dss_drop(solver: *mut InterfaceIntelDSS);
- fn solver_intel_dss_initialize(
- solver: *mut InterfaceIntelDSS,
- general_symmetric: CcBool,
- positive_definite: CcBool,
- ndim: i32,
- row_pointers: *const i32,
- col_indices: *const i32,
- ) -> i32;
- fn solver_intel_dss_factorize(
- solver: *mut InterfaceIntelDSS,
- determinant_coefficient: *mut f64,
- determinant_exponent: *mut f64,
- compute_determinant: CcBool,
- values: *const f64,
- ) -> i32;
- fn solver_intel_dss_solve(solver: *mut InterfaceIntelDSS, x: *mut f64, rhs: *const f64) -> i32;
-}
-
-/// Wraps the IntelDSS solver for sparse linear systems
-///
-/// **Warning:** This solver does not check whether the matrix is singular or not;
-/// thus it may return **incorrect results** if a singular matrix is given to factorize.
-///
-/// **Warning:** This solver may fail with large matrices (e.g., ATandT/pre2) and
-/// may return **incorrect results**.
-pub struct SolverIntelDSS {
- /// Holds a pointer to the C interface to IntelDSS
- solver: *mut InterfaceIntelDSS,
-
- /// Indicates whether the solver has been initialized or not (just once)
- initialized: bool,
-
- /// Indicates whether the sparse matrix has been factorized or not
- factorized: bool,
-
- /// Holds the symmetry type used in initialize
- initialized_symmetry: Symmetry,
-
- /// Holds the matrix dimension saved in initialize
- initialized_ndim: usize,
-
- /// Holds the number of non-zeros saved in initialize
- initialized_nnz: usize,
-
- /// Holds the determinant coefficient: det = coefficient * pow(10, exponent)
- determinant_coefficient: f64,
-
- /// Holds the determinant exponent: det = coefficient * pow(10, exponent)
- determinant_exponent: f64,
-
- /// Stopwatch to measure computation times
- stopwatch: Stopwatch,
-
- /// Time spent on initialize in nanoseconds
- time_initialize_ns: u128,
-
- /// Time spent on factorize in nanoseconds
- time_factorize_ns: u128,
-
- /// Time spent on solve in nanoseconds
- time_solve_ns: u128,
-}
-
-impl Drop for SolverIntelDSS {
- /// Tells the c-code to release memory
- fn drop(&mut self) {
- unsafe {
- solver_intel_dss_drop(self.solver);
- }
- }
-}
-
-impl SolverIntelDSS {
- /// Allocates a new instance
- ///
- /// **Warning:** This solver does not check whether the matrix is singular or not;
- /// thus it may return **incorrect results** if a singular matrix is given to factorize.
- ///
- /// **Warning:** This solver may fail with large matrices (e.g., ATandT/pre2) and
- /// may return **incorrect results**.
- pub fn new() -> Result {
- if !cfg!(with_intel_dss) {
- return Err("This code has not been compiled with Intel DSS");
- }
- unsafe {
- let solver = solver_intel_dss_new();
- if solver.is_null() {
- return Err("c-code failed to allocate the IntelDSS solver");
- }
- Ok(SolverIntelDSS {
- solver,
- initialized: false,
- factorized: false,
- initialized_symmetry: Symmetry::No,
- initialized_ndim: 0,
- initialized_nnz: 0,
- determinant_coefficient: 0.0,
- determinant_exponent: 0.0,
- stopwatch: Stopwatch::new(),
- time_initialize_ns: 0,
- time_factorize_ns: 0,
- time_solve_ns: 0,
- })
- }
- }
-}
-
-impl LinSolTrait for SolverIntelDSS {
- /// Performs the factorization (and analysis/initialization if needed)
- ///
- /// # Input
- ///
- /// * `mat` -- the coefficient matrix A (**COO** or **CSR**, but not CSC).
- /// Also, the matrix must be square (`nrow = ncol`) and, if symmetric,
- /// the symmetry/storage must [crate::Storage::Upper].
- /// * `params` -- configuration parameters; None => use default
- ///
- /// # Notes
- ///
- /// 1. The structure of the matrix (nrow, ncol, nnz, symmetry) must be
- /// exactly the same among multiple calls to `factorize`. The values may differ
- /// from call to call, nonetheless.
- /// 2. The first call to `factorize` will define the structure which must be
- /// kept the same for the next calls.
- /// 3. If the structure of the matrix needs to be changed, the solver must
- /// be "dropped" and a new solver allocated.
- /// 4. For symmetric matrices, `DSS` requires that the symmetry/storage be [crate::Storage::Upper].
- ///
- /// **Warning:** This solver does not check whether the matrix is singular or not;
- /// thus it may return **incorrect results** if a singular matrix is given to factorize.
- ///
- /// **Warning:** This solver may fail with large matrices (e.g., ATandT/pre2) and
- /// may return **incorrect results**.
- fn factorize(&mut self, mat: &mut SparseMatrix, params: Option) -> Result<(), StrError> {
- // get CSR matrix
- // (or convert from COO if CSR is not available and COO is available)
- let csr = mat.get_csr_or_from_coo()?;
-
- // check CSR matrix
- if csr.nrow != csr.ncol {
- return Err("the matrix must be square");
- }
-
- // check already initialized data
- if self.initialized {
- if csr.symmetry != self.initialized_symmetry {
- return Err("subsequent factorizations must use the same matrix (symmetry differs)");
- }
- if csr.nrow != self.initialized_ndim {
- return Err("subsequent factorizations must use the same matrix (ndim differs)");
- }
- if (csr.row_pointers[csr.nrow] as usize) != self.initialized_nnz {
- return Err("subsequent factorizations must use the same matrix (nnz differs)");
- }
- } else {
- self.initialized_symmetry = csr.symmetry;
- self.initialized_ndim = csr.nrow;
- self.initialized_nnz = csr.row_pointers[csr.nrow] as usize;
- if self.initialized_nnz < self.initialized_ndim {
- return Err("for Intel DSS, nnz = row_pointers[nrow] must be ≥ nrow");
- }
- }
-
- // configuration parameters
- let par = if let Some(p) = params { p } else { LinSolParams::new() };
-
- // requests
- let calc_det = if par.compute_determinant { 1 } else { 0 };
-
- // extract the symmetry flags and check the storage type
- let (general_symmetric, positive_definite) = csr.symmetry.status(false, true)?;
-
- // matrix config
- let ndim = to_i32(csr.nrow);
- let nnz = self.initialized_nnz;
-
- // call initialize just once
- if !self.initialized {
- self.stopwatch.reset();
- unsafe {
- let status = solver_intel_dss_initialize(
- self.solver,
- general_symmetric,
- positive_definite,
- ndim,
- csr.row_pointers.as_ptr(),
- csr.col_indices[0..nnz].as_ptr(),
- );
- if status != SUCCESSFUL_EXIT {
- return Err(handle_intel_dss_error_code(status));
- }
- }
- self.time_initialize_ns = self.stopwatch.stop();
- self.initialized = true;
- }
-
- // call factorize
- self.stopwatch.reset();
- unsafe {
- let status = solver_intel_dss_factorize(
- self.solver,
- &mut self.determinant_coefficient,
- &mut self.determinant_exponent,
- calc_det,
- csr.values[0..nnz].as_ptr(),
- );
- if status != SUCCESSFUL_EXIT {
- return Err(handle_intel_dss_error_code(status));
- }
- }
- self.time_factorize_ns = self.stopwatch.stop();
-
- // done
- self.factorized = true;
- Ok(())
- }
-
- /// Computes the solution of the linear system
- ///
- /// Solves the linear system:
- ///
- /// ```text
- /// A · x = rhs
- /// (m,m) (m) (m)
- /// ```
- ///
- /// # Output
- ///
- /// * `x` -- the vector of unknown values with dimension equal to mat.nrow
- ///
- /// # Input
- ///
- /// * `mat` -- the coefficient matrix A; must be square and, if symmetric, [crate::Storage::Upper].
- /// * `rhs` -- the right-hand side vector with know values an dimension equal to mat.nrow
- /// * `_verbose` -- not used
- ///
- /// **Warning:** the matrix must be same one used in `factorize`.
- fn solve(&mut self, x: &mut Vector, mat: &SparseMatrix, rhs: &Vector, _verbose: bool) -> Result<(), StrError> {
- // check
- if !self.factorized {
- return Err("the function factorize must be called before solve");
- }
-
- // access CSR matrix
- // (possibly already converted from COO, because factorize was (should have been) called)
- let csr = mat.get_csr()?;
-
- // check already factorized data
- let (nrow, ncol, nnz, symmetry) = csr.get_info();
- if symmetry != self.initialized_symmetry {
- return Err("solve must use the same matrix (symmetry differs)");
- }
- if nrow != self.initialized_ndim || ncol != self.initialized_ndim {
- return Err("solve must use the same matrix (ndim differs)");
- }
- if nnz != self.initialized_nnz {
- return Err("solve must use the same matrix (nnz differs)");
- }
-
- // check vectors
- if x.dim() != self.initialized_ndim {
- return Err("the dimension of the vector of unknown values x is incorrect");
- }
- if rhs.dim() != self.initialized_ndim {
- return Err("the dimension of the right-hand side vector is incorrect");
- }
-
- // call Intel DSS solve
- self.stopwatch.reset();
- unsafe {
- let status = solver_intel_dss_solve(self.solver, x.as_mut_data().as_mut_ptr(), rhs.as_data().as_ptr());
- if status != SUCCESSFUL_EXIT {
- return Err(handle_intel_dss_error_code(status));
- }
- }
- self.time_solve_ns = self.stopwatch.stop();
-
- // done
- Ok(())
- }
-
- /// Updates the stats structure (should be called after solve)
- fn update_stats(&self, stats: &mut StatsLinSol) {
- stats.main.solver = if cfg!(with_intel_dss) {
- "IntelDSS".to_string()
- } else {
- "INTEL_DSS_IS_NOT_AVAILABLE".to_string()
- };
- stats.determinant.mantissa_real = self.determinant_coefficient;
- stats.determinant.mantissa_imag = 0.0;
- stats.determinant.base = 10.0;
- stats.determinant.exponent = self.determinant_exponent;
- stats.time_nanoseconds.initialize = self.time_initialize_ns;
- stats.time_nanoseconds.factorize = self.time_factorize_ns;
- stats.time_nanoseconds.solve = self.time_solve_ns;
- }
-}
-
-/// Handles Intel DSS error code
-pub(crate) fn handle_intel_dss_error_code(err: i32) -> StrError {
- match err {
- -1 => return "MKL_DSS_ZERO_PIVOT",
- -2 => return "MKL_DSS_OUT_OF_MEMORY",
- -3 => return "MKL_DSS_FAILURE",
- -4 => return "MKL_DSS_ROW_ERR",
- -5 => return "MKL_DSS_COL_ERR",
- -6 => return "MKL_DSS_TOO_FEW_VALUES",
- -7 => return "MKL_DSS_TOO_MANY_VALUES",
- -8 => return "MKL_DSS_NOT_SQUARE",
- -9 => return "MKL_DSS_STATE_ERR",
- -10 => return "MKL_DSS_INVALID_OPTION",
- -11 => return "MKL_DSS_OPTION_CONFLICT",
- -12 => return "MKL_DSS_MSG_LVL_ERR",
- -13 => return "MKL_DSS_TERM_LVL_ERR",
- -14 => return "MKL_DSS_STRUCTURE_ERR",
- -15 => return "MKL_DSS_REORDER_ERR",
- -16 => return "MKL_DSS_VALUES_ERR",
- 17 => return "MKL_DSS_STATISTICS_INVALID_MATRIX",
- 18 => return "MKL_DSS_STATISTICS_INVALID_STATE",
- 19 => return "MKL_DSS_STATISTICS_INVALID_STRING",
- 20 => return "MKL_DSS_REORDER1_ERR",
- 21 => return "MKL_DSS_PREORDER_ERR",
- 22 => return "MKL_DSS_DIAG_ERR",
- 23 => return "MKL_DSS_I32BIT_ERR",
- 24 => return "MKL_DSS_OOC_MEM_ERR",
- 25 => return "MKL_DSS_OOC_OC_ERR",
- 26 => return "MKL_DSS_OOC_RW_ERR",
- ERROR_NULL_POINTER => return "Intel DSS failed due to NULL POINTER error",
- ERROR_MALLOC => return "Intel DSS failed due to MALLOC error",
- ERROR_VERSION => return "Intel DSS failed due to VERSION error",
- ERROR_NOT_AVAILABLE => return "Intel DSS is not AVAILABLE",
- ERROR_NEED_INITIALIZATION => return "Intel DSS failed because INITIALIZATION is needed",
- ERROR_NEED_FACTORIZATION => return "Intel DSS failed because FACTORIZATION is needed",
- ERROR_ALREADY_INITIALIZED => return "Intel DSS failed because INITIALIZATION has been completed already",
- _ => return "Error: unknown error returned by c-code (IntelDSS)",
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-#[cfg(test)]
-#[cfg(with_intel_dss)]
-mod tests {
- use super::*;
- use crate::{CooMatrix, LinSolParams, LinSolTrait, Samples, SparseMatrix, StatsLinSol, Storage, Symmetry};
- use russell_lab::{approx_eq, vec_approx_eq, Vector};
-
- #[test]
- fn new_and_drop_work() {
- // you may debug into the C-code to see that drop is working
- let solver = SolverIntelDSS::new().unwrap();
- assert!(!solver.factorized);
- }
-
- #[test]
- fn factorize_handles_errors() {
- let mut solver = SolverIntelDSS::new().unwrap();
- assert!(!solver.factorized);
-
- // COO to CSR errors
- let coo = CooMatrix::new(1, 1, 1, None).unwrap();
- let mut mat = SparseMatrix::from_coo(coo);
- assert_eq!(
- solver.factorize(&mut mat, None).err(),
- Some("COO to CSR requires nnz > 0")
- );
-
- // check CSR matrix
- let (coo, _, _, _) = Samples::rectangular_1x7();
- let mut mat = SparseMatrix::from_coo(coo);
- assert_eq!(
- solver.factorize(&mut mat, None).err(),
- Some("the matrix must be square")
- );
- let (coo, _, _, _) = Samples::mkl_symmetric_5x5_lower(false, false);
- let mut mat = SparseMatrix::from_coo(coo);
- assert_eq!(
- solver.factorize(&mut mat, None).err(),
- Some("if the matrix is general symmetric, the required storage is upper triangular")
- );
-
- // check already factorized data
- let mut coo = CooMatrix::new(2, 2, 2, None).unwrap();
- coo.put(0, 0, 1.0).unwrap();
- coo.put(1, 1, 2.0).unwrap();
- let mut mat = SparseMatrix::from_coo(coo);
- // ... factorize once => OK
- solver.factorize(&mut mat, None).unwrap();
- // ... change matrix (symmetry)
- let mut coo = CooMatrix::new(2, 2, 2, Some(Symmetry::General(Storage::Full))).unwrap();
- coo.put(0, 0, 1.0).unwrap();
- coo.put(1, 1, 2.0).unwrap();
- let mut mat = SparseMatrix::from_coo(coo);
- assert_eq!(
- solver.factorize(&mut mat, None).err(),
- Some("subsequent factorizations must use the same matrix (symmetry differs)")
- );
- // ... change matrix (ndim)
- let mut coo = CooMatrix::new(1, 1, 1, None).unwrap();
- coo.put(0, 0, 1.0).unwrap();
- let mut mat = SparseMatrix::from_coo(coo);
- assert_eq!(
- solver.factorize(&mut mat, None).err(),
- Some("subsequent factorizations must use the same matrix (ndim differs)")
- );
- // ... change matrix (nnz)
- let mut coo = CooMatrix::new(2, 2, 1, None).unwrap();
- coo.put(0, 0, 1.0).unwrap();
- let mut mat = SparseMatrix::from_coo(coo);
- assert_eq!(
- solver.factorize(&mut mat, None).err(),
- Some("subsequent factorizations must use the same matrix (nnz differs)")
- );
- }
-
- #[test]
- fn factorize_works() {
- let mut solver = SolverIntelDSS::new().unwrap();
- assert!(!solver.factorized);
- let (coo, _, _, _) = Samples::umfpack_unsymmetric_5x5();
- let mut mat = SparseMatrix::from_coo(coo);
- let mut params = LinSolParams::new();
-
- params.compute_determinant = true;
-
- solver.factorize(&mut mat, Some(params)).unwrap();
- assert!(solver.factorized);
- let det = solver.determinant_coefficient * f64::powf(10.0, solver.determinant_exponent);
- approx_eq(det, 114.0, 1e-13);
-
- // calling factorize again works
- solver.factorize(&mut mat, Some(params)).unwrap();
- let det = solver.determinant_coefficient * f64::powf(10.0, solver.determinant_exponent);
- approx_eq(det, 114.0, 1e-13);
- }
-
- #[test]
- fn factorize_fails_on_singular_matrix() {
- let mut solver = SolverIntelDSS::new().unwrap();
- let mut coo = CooMatrix::new(2, 2, 2, None).unwrap();
- coo.put(0, 0, 1.0).unwrap();
- coo.put(1, 1, 0.0).unwrap();
- let mut mat = SparseMatrix::from_coo(coo);
- println!("Warning: Intel DSS does not detect singular matrices");
- assert_eq!(solver.factorize(&mut mat, None).err(), None);
- }
-
- #[test]
- fn solve_handles_errors() {
- let mut coo = CooMatrix::new(2, 2, 2, None).unwrap();
- coo.put(0, 0, 123.0).unwrap();
- coo.put(1, 1, 456.0).unwrap();
- let mut mat = SparseMatrix::from_coo(coo);
- let mut solver = SolverIntelDSS::new().unwrap();
- assert!(!solver.factorized);
- let mut x = Vector::new(2);
- let rhs = Vector::new(2);
- assert_eq!(
- solver.solve(&mut x, &mut mat, &rhs, false),
- Err("the function factorize must be called before solve")
- );
- let mut x = Vector::new(1);
- solver.factorize(&mut mat, None).unwrap();
- assert_eq!(
- solver.solve(&mut x, &mut mat, &rhs, false),
- Err("the dimension of the vector of unknown values x is incorrect")
- );
- let mut x = Vector::new(2);
- let rhs = Vector::new(1);
- assert_eq!(
- solver.solve(&mut x, &mut mat, &rhs, false),
- Err("the dimension of the right-hand side vector is incorrect")
- );
- // wrong symmetry
- let rhs = Vector::new(2);
- let mut coo_wrong = CooMatrix::new(2, 2, 2, Some(Symmetry::General(Storage::Full))).unwrap();
- coo_wrong.put(0, 0, 123.0).unwrap();
- coo_wrong.put(1, 1, 456.0).unwrap();
- let mut mat_wrong = SparseMatrix::from_coo(coo_wrong);
- mat_wrong.get_csr_or_from_coo().unwrap(); // make sure to convert to CSR (because we're not calling factorize on this wrong matrix)
- assert_eq!(
- solver.solve(&mut x, &mut mat_wrong, &rhs, false),
- Err("solve must use the same matrix (symmetry differs)")
- );
- // wrong ndim
- let mut coo_wrong = CooMatrix::new(1, 1, 1, None).unwrap();
- coo_wrong.put(0, 0, 123.0).unwrap();
- let mut mat_wrong = SparseMatrix::from_coo(coo_wrong);
- mat_wrong.get_csr_or_from_coo().unwrap(); // make sure to convert to CSR (because we're not calling factorize on this wrong matrix)
- assert_eq!(
- solver.solve(&mut x, &mut mat_wrong, &rhs, false),
- Err("solve must use the same matrix (ndim differs)")
- );
- // wrong nnz
- let mut coo_wrong = CooMatrix::new(2, 2, 3, None).unwrap();
- coo_wrong.put(0, 0, 123.0).unwrap();
- coo_wrong.put(1, 1, 123.0).unwrap();
- coo_wrong.put(0, 1, 100.0).unwrap();
- let mut mat_wrong = SparseMatrix::from_coo(coo_wrong);
- mat_wrong.get_csr_or_from_coo().unwrap(); // make sure to convert to CSR (because we're not calling factorize on this wrong matrix)
- assert_eq!(
- solver.solve(&mut x, &mut mat_wrong, &rhs, false),
- Err("solve must use the same matrix (nnz differs)")
- );
- }
-
- #[test]
- fn solve_works() {
- let mut solver = SolverIntelDSS::new().unwrap();
- let (coo, _, _, _) = Samples::umfpack_unsymmetric_5x5();
- let mut mat = SparseMatrix::from_coo(coo);
- let mut x = Vector::new(5);
- let rhs = Vector::from(&[8.0, 45.0, -3.0, 3.0, 19.0]);
- let x_correct = &[1.0, 2.0, 3.0, 4.0, 5.0];
- solver.factorize(&mut mat, None).unwrap();
- solver.solve(&mut x, &mut mat, &rhs, false).unwrap();
- vec_approx_eq(x.as_data(), x_correct, 1e-14);
-
- // calling solve again works
- let mut x_again = Vector::new(5);
- solver.solve(&mut x_again, &mut mat, &rhs, false).unwrap();
- vec_approx_eq(x_again.as_data(), x_correct, 1e-14);
-
- // update stats
- let mut stats = StatsLinSol::new();
- solver.update_stats(&mut stats);
- assert_eq!(stats.output.effective_ordering, "Unknown");
- assert_eq!(stats.output.effective_scaling, "Unknown");
- }
-
- #[test]
- fn handle_intel_dss_error_code_works() {
- let default = "Error: unknown error returned by c-code (IntelDSS)";
- for i in 1..17 {
- let res = handle_intel_dss_error_code(-i);
- assert!(res.len() > 0);
- assert_ne!(res, default);
- }
- for i in 17..27 {
- let res = handle_intel_dss_error_code(i);
- assert!(res.len() > 0);
- assert_ne!(res, default);
- }
- assert_eq!(
- handle_intel_dss_error_code(ERROR_NULL_POINTER),
- "Intel DSS failed due to NULL POINTER error"
- );
- assert_eq!(
- handle_intel_dss_error_code(ERROR_MALLOC),
- "Intel DSS failed due to MALLOC error"
- );
- assert_eq!(
- handle_intel_dss_error_code(ERROR_VERSION),
- "Intel DSS failed due to VERSION error"
- );
- assert_eq!(
- handle_intel_dss_error_code(ERROR_NOT_AVAILABLE),
- "Intel DSS is not AVAILABLE"
- );
- assert_eq!(
- handle_intel_dss_error_code(ERROR_NEED_INITIALIZATION),
- "Intel DSS failed because INITIALIZATION is needed"
- );
- assert_eq!(
- handle_intel_dss_error_code(ERROR_NEED_FACTORIZATION),
- "Intel DSS failed because FACTORIZATION is needed"
- );
- assert_eq!(
- handle_intel_dss_error_code(ERROR_ALREADY_INITIALIZED),
- "Intel DSS failed because INITIALIZATION has been completed already"
- );
- assert_eq!(handle_intel_dss_error_code(123), default);
- }
-}
diff --git a/russell_sparse/tests/test_nonlinear_system.rs b/russell_sparse/tests/test_nonlinear_system.rs
index 7312072d..39d60289 100644
--- a/russell_sparse/tests/test_nonlinear_system.rs
+++ b/russell_sparse/tests/test_nonlinear_system.rs
@@ -133,9 +133,3 @@ fn test_nonlinear_system_mumps() -> Result<(), StrError> {
fn test_nonlinear_system_umfpack() -> Result<(), StrError> {
solve_nonlinear_system(Genie::Umfpack)
}
-
-#[cfg(with_intel_dss)]
-#[test]
-fn test_nonlinear_system_intel_dss() -> Result<(), StrError> {
- solve_nonlinear_system(Genie::IntelDss)
-}
diff --git a/russell_sparse/zscripts/memcheck.bash b/russell_sparse/zscripts/memcheck.bash
index adcddfff..ebbaea16 100755
--- a/russell_sparse/zscripts/memcheck.bash
+++ b/russell_sparse/zscripts/memcheck.bash
@@ -16,10 +16,6 @@ $VALGRIND --bin mem_check
$VALGRIND --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx -d
$VALGRIND --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx -d -g mumps
-if [[ "$WITH_DSS" == "1" ]]; then
- $VALGRIND --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx -d --genie dss
-fi
$VALGRIND --example nonlinear_system_4eqs --
$VALGRIND --example nonlinear_system_4eqs -- -g mumps
-$VALGRIND --example nonlinear_system_4eqs -- -g dss
diff --git a/russell_sparse/zscripts/run-solve-matrix-market.bash b/russell_sparse/zscripts/run-solve-matrix-market.bash
index 0c1b2f9d..c0ba481c 100755
--- a/russell_sparse/zscripts/run-solve-matrix-market.bash
+++ b/russell_sparse/zscripts/run-solve-matrix-market.bash
@@ -1,23 +1,14 @@
#!/bin/bash
-if [[ -z "${RUSSELL_SPARSE_WITH_INTEL_DSS}" ]]; then
- WITH_DSS="0"
-else
- WITH_DSS="${RUSSELL_SPARSE_WITH_INTEL_DSS}"
-fi
-
-cargo build --release
-
-cargo run --release --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx
-cargo run --release --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx --genie mumps
+# the first argument is the "mkl" option
+BLAS_LIB=${1:-""}
-if [[ "$WITH_DSS" == "1" ]]; then
- cargo run --release --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx --genie dss
+FEAT=""
+if [ "${BLAS_LIB}" = "mkl" ]; then
+ FEAT="--features intel_mkl"
fi
-#cargo run --release --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx > data/results/solve-matrix-market-umfpack-bfwb62.json
-#cargo run --release --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx --genie mumps > data/results/solve-matrix-market-mumps-bfwb62.json
+cargo build --release $FEAT
-#if [[ "$WITH_DSS" == "1" ]]; then
- #cargo run --release --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx --genie dss > data/results/solve-matrix-market-dss-bfwb62.json
-#fi
+cargo run --release $FEAT --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx
+cargo run --release $FEAT --bin solve_matrix_market -- data/matrix_market/bfwb62.mtx --genie mumps
diff --git a/russell_stat/Cargo.toml b/russell_stat/Cargo.toml
index 2df4b79d..d7079174 100644
--- a/russell_stat/Cargo.toml
+++ b/russell_stat/Cargo.toml
@@ -11,6 +11,9 @@ readme = "README.md"
categories = ["mathematics", "science"]
keywords = ["statistics", "probability", "random", "numerical"]
+[features]
+intel_mkl = ["russell_lab/intel_mkl"]
+
[dependencies]
russell_lab = { path = "../russell_lab", version = "0.8.0" }
num-traits = "0.2"
diff --git a/russell_tensor/Cargo.toml b/russell_tensor/Cargo.toml
index 2db244e1..c0b6b02a 100644
--- a/russell_tensor/Cargo.toml
+++ b/russell_tensor/Cargo.toml
@@ -11,6 +11,9 @@ readme = "README.md"
categories = ["mathematics", "science"]
keywords = ["tensor", "Voigt", "Mandel", "continuum", "mechanics"]
+[features]
+intel_mkl = ["russell_lab/intel_mkl"]
+
[dependencies]
russell_lab = { path = "../russell_lab", version = "0.8.0" }
serde = { version = "1.0", features = ["derive"] }