Skip to content

Commit

Permalink
Remove memory leaks in matrix and vector unit tests (#149)
Browse files Browse the repository at this point in the history
* Fix CPU bug in Gram-Schmidt test.

* Remove memory leaks in matrix/vector unit tests.

* Clean up GS unit tests.
  • Loading branch information
pelesh authored Apr 5, 2024
1 parent 06dc229 commit d913854
Show file tree
Hide file tree
Showing 8 changed files with 217 additions and 325 deletions.
6 changes: 4 additions & 2 deletions resolve/workspace/LinAlgWorkspaceCUDA.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@ namespace ReSolve
cusparseDestroy(handle_cusparse_);
cusolverSpDestroy(handle_cusolversp_);
cublasDestroy(handle_cublas_);
cusparseDestroySpMat(mat_A_);
if (matvec_setup_done_) {
cusparseDestroySpMat(mat_A_);
}
}

void* LinAlgWorkspaceCUDA::getSpmvBuffer()
Expand Down Expand Up @@ -53,7 +55,7 @@ namespace ReSolve

void LinAlgWorkspaceCUDA::setNormBufferState(bool r)
{
norm_buffer_ready_ = r;;
norm_buffer_ready_ = r;
}

cusparseHandle_t LinAlgWorkspaceCUDA::getCusparseHandle()
Expand Down
4 changes: 3 additions & 1 deletion resolve/workspace/LinAlgWorkspaceHIP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ namespace ReSolve
{
rocsparse_destroy_handle(handle_rocsparse_);
rocblas_destroy_handle(handle_rocblas_);
rocsparse_destroy_mat_descr(mat_A_);
if (matvec_setup_done_) {
rocsparse_destroy_mat_descr(mat_A_);
}
if (d_r_size_ != 0) mem_.deleteOnDevice(d_r_);
if (norm_buffer_ready_ == true) mem_.deleteOnDevice(norm_buffer_);
}
Expand Down
87 changes: 29 additions & 58 deletions tests/unit/matrix/MatrixHandlerTests.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,15 @@ namespace ReSolve { namespace tests {
class MatrixHandlerTests : TestBase
{
public:
MatrixHandlerTests(std::string memspace) : memspace_(memspace)
{}
MatrixHandlerTests(ReSolve::MatrixHandler& handler) : handler_(handler)
{
if (handler_.getIsCudaEnabled() || handler_.getIsHipEnabled()) {
memspace_ = memory::DEVICE;
} else {
memspace_ = memory::HOST;
}
}

virtual ~MatrixHandlerTests()
{}

Expand All @@ -34,88 +41,52 @@ class MatrixHandlerTests : TestBase
TestOutcome matrixInfNorm(index_type N)
{
TestStatus status;
ReSolve::memory::MemorySpace ms;
if (memspace_ == "cpu")
ms = memory::HOST;
else
ms = memory::DEVICE;

ReSolve::MatrixHandler* handler = createMatrixHandler();

matrix::Csr* A = createCsrMatrix(N, memspace_);
matrix::Csr* A = createCsrMatrix(N);
real_type norm;
handler->matrixInfNorm(A, &norm, ms);
handler_.matrixInfNorm(A, &norm, memspace_);
status *= (norm == 30.0);

delete handler;
delete A;

return status.report(__func__);
}

TestOutcome matVec(index_type N)
{
TestStatus status;
ReSolve::memory::MemorySpace ms;
if (memspace_ == "cpu")
ms = memory::HOST;
else
ms = memory::DEVICE;

ReSolve::MatrixHandler* handler = createMatrixHandler();

matrix::Csr* A = createCsrMatrix(N, memspace_);
matrix::Csr* A = createCsrMatrix(N);
vector::Vector x(N);
vector::Vector y(N);
x.allocate(ms);
if (x.getData(ms) == NULL) printf("oups we have an issue \n");
y.allocate(ms);
x.allocate(memspace_);
if (x.getData(memspace_) == NULL)
std::cout << "Oups we have an issue \n";
y.allocate(memspace_);

x.setToConst(1.0, ms);
y.setToConst(1.0, ms);
x.setToConst(1.0, memspace_);
y.setToConst(1.0, memspace_);

real_type alpha = 2.0/30.0;
real_type beta = 2.0;
handler->setValuesChanged(true, ms);
handler->matvec(A, &x, &y, &alpha, &beta, "csr", ms);
handler_.setValuesChanged(true, memspace_);
handler_.matvec(A, &x, &y, &alpha, &beta, "csr", memspace_);

status *= verifyAnswer(y, 4.0, memspace_);
status *= verifyAnswer(y, 4.0);

delete handler;
delete A;

return status.report(__func__);
}

private:
std::string memspace_{"cpu"};

ReSolve::MatrixHandler* createMatrixHandler()
{
if (memspace_ == "cpu") {
LinAlgWorkspaceCpu* workspace = new LinAlgWorkspaceCpu();
return new MatrixHandler(workspace);
#ifdef RESOLVE_USE_CUDA
} else if (memspace_ == "cuda") {
LinAlgWorkspaceCUDA* workspace = new LinAlgWorkspaceCUDA();
workspace->initializeHandles();
return new MatrixHandler(workspace);
#endif
#ifdef RESOLVE_USE_HIP
} else if (memspace_ == "hip") {
LinAlgWorkspaceHIP* workspace = new LinAlgWorkspaceHIP();
workspace->initializeHandles();
return new MatrixHandler(workspace);
#endif
} else {
std::cout << "ReSolve not built with support for memory space " << memspace_ << "\n";
}
return nullptr;
}
ReSolve::MatrixHandler& handler_;
memory::MemorySpace memspace_{memory::HOST};

bool verifyAnswer(vector::Vector& x, real_type answer, std::string memspace)
bool verifyAnswer(vector::Vector& x, real_type answer)
{
bool status = true;
if (memspace != "cpu") {
if (memspace_ == memory::DEVICE) {
x.copyData(memory::DEVICE, memory::HOST);
}

Expand All @@ -131,7 +102,7 @@ class MatrixHandlerTests : TestBase
return status;
}

matrix::Csr* createCsrMatrix(const index_type N, std::string memspace)
matrix::Csr* createCsrMatrix(const index_type N)
{
std::vector<real_type> r1 = {1., 5., 7., 8., 3., 2., 4.}; // sum 30
std::vector<real_type> r2 = {1., 3., 2., 2., 1., 6., 7., 3., 2., 3.}; // sum 30
Expand Down Expand Up @@ -177,8 +148,8 @@ class MatrixHandlerTests : TestBase
}
A->setUpdated(memory::HOST);

if ((memspace == "cuda") || (memspace == "hip")) {
A->copyData(memory::DEVICE);
if (memspace_ == memory::DEVICE) {
A->copyData(memspace_);
}

return A;
Expand Down
18 changes: 14 additions & 4 deletions tests/unit/matrix/runMatrixHandlerTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,12 @@ int main(int, char**)

{
std::cout << "Running tests on CPU:\n";
ReSolve::tests::MatrixHandlerTests test("cpu");


ReSolve::LinAlgWorkspaceCpu workspace;
workspace.initializeHandles();
ReSolve::MatrixHandler handler(&workspace);

ReSolve::tests::MatrixHandlerTests test(handler);
result += test.matrixHandlerConstructor();
result += test.matrixInfNorm(10000);
result += test.matVec(50);
Expand All @@ -23,8 +27,11 @@ int main(int, char**)
#ifdef RESOLVE_USE_CUDA
{
std::cout << "Running tests with CUDA backend:\n";
ReSolve::tests::MatrixHandlerTests test("cuda");
ReSolve::LinAlgWorkspaceCUDA workspace;
workspace.initializeHandles();
ReSolve::MatrixHandler handler(&workspace);

ReSolve::tests::MatrixHandlerTests test(handler);
result += test.matrixHandlerConstructor();
result += test.matrixInfNorm(1000000);
result += test.matVec(50);
Expand All @@ -36,8 +43,11 @@ int main(int, char**)
#ifdef RESOLVE_USE_HIP
{
std::cout << "Running tests with HIP backend:\n";
ReSolve::tests::MatrixHandlerTests test("hip");
ReSolve::LinAlgWorkspaceHIP workspace;
workspace.initializeHandles();
ReSolve::MatrixHandler handler(&workspace);

ReSolve::tests::MatrixHandlerTests test(handler);
result += test.matrixHandlerConstructor();
result += test.matrixInfNorm(1000000);
result += test.matVec(50);
Expand Down
73 changes: 33 additions & 40 deletions tests/unit/vector/GramSchmidtTests.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,12 @@ namespace ReSolve
class GramSchmidtTests : TestBase
{
public:
GramSchmidtTests(ReSolve::VectorHandler* handler) : handler_(handler)
GramSchmidtTests(ReSolve::VectorHandler& handler) : handler_(handler)
{
if (handler_.getIsCudaEnabled() || handler_.getIsHipEnabled())
memspace_ = memory::DEVICE;
else
memspace_ = memory::HOST;
}

virtual ~GramSchmidtTests()
Expand Down Expand Up @@ -62,99 +66,88 @@ namespace ReSolve
break;
}

ReSolve::memory::MemorySpace ms;
if (handler_->getIsCudaEnabled() || handler_->getIsHipEnabled())
ms = memory::DEVICE;
else
ms = memory::HOST;

vector::Vector* V = new vector::Vector(N, 3); // we will be using a space of 3 vectors
vector::Vector V(N, 3); // we will be using a space of 3 vectors
real_type* H = new real_type[9]; // In this case, Hessenberg matrix is NOT 3 x 2 ???
real_type* aux_data = nullptr; // needed for setup

V->allocate(memory::DEVICE);
V->allocate(memory::HOST);
V.allocate(memspace_);
if (memspace_ == memory::DEVICE) {
V.allocate(memory::HOST);
}

ReSolve::GramSchmidt* GS = new ReSolve::GramSchmidt(handler_, var);
GS->setup(N, 3);
ReSolve::GramSchmidt GS(&handler_, var);
GS.setup(N, 3);

//fill 2nd and 3rd vector with values
aux_data = V->getVectorData(1, memory::HOST);
aux_data = V.getVectorData(1, memory::HOST);
for (int i = 0; i < N; ++i) {
if ( i % 2 == 0) {
aux_data[i] = constants::ONE;
} else {
aux_data[i] = var1;
}
}
aux_data = V->getVectorData(2, memory::HOST);
aux_data = V.getVectorData(2, memory::HOST);
for (int i = 0; i < N; ++i) {
if ( i % 3 > 0) {
aux_data[i] = constants::ZERO;
} else {
aux_data[i] = var2;
}
}
V->setDataUpdated(memory::HOST);
V->copyData(memory::HOST, ms);
V.setDataUpdated(memory::HOST);
V.copyData(memory::HOST, memspace_);

//set the first vector to all 1s, normalize
V->setToConst(0, 1.0, ms);
real_type nrm = handler_->dot(V, V, ms);
V.setToConst(0, 1.0, memspace_);
real_type nrm = handler_.dot(&V, &V, memspace_);
nrm = sqrt(nrm);
nrm = 1.0 / nrm;
handler_->scal(&nrm, V, ms);
handler_.scal(&nrm, &V, memspace_);

GS->orthogonalize(N, V, H, 0);
GS->orthogonalize(N, V, H, 1);
GS.orthogonalize(N, &V, H, 0);
GS.orthogonalize(N, &V, H, 1);
status *= verifyAnswer(V, 3);

delete [] H;
delete V;
delete GS;

return status.report(testname.c_str());
}

private:
ReSolve::VectorHandler* handler_{nullptr};
ReSolve::VectorHandler& handler_;
ReSolve::memory::MemorySpace memspace_;

// x is a multivector containing K vectors
bool verifyAnswer(vector::Vector* x, index_type K)
bool verifyAnswer(vector::Vector& x, index_type K)
{
ReSolve::memory::MemorySpace ms;
if (handler_->getIsCudaEnabled() || handler_->getIsHipEnabled())
ms = memory::DEVICE;
else
ms = memory::HOST;

vector::Vector* a = new vector::Vector(x->getSize());
vector::Vector* b = new vector::Vector(x->getSize());
vector::Vector a(x.getSize());
vector::Vector b(x.getSize());

real_type ip;
bool status = true;

for (index_type i = 0; i < K; ++i) {
for (index_type j = 0; j < K; ++j) {
a->update(x->getVectorData(i, ms), ms, memory::HOST);
b->update(x->getVectorData(j, ms), ms, memory::HOST);
ip = handler_->dot(a, b, memory::HOST);
a.update(x.getVectorData(i, memspace_), memspace_, memory::HOST);
b.update(x.getVectorData(j, memspace_), memspace_, memory::HOST);
ip = handler_.dot(&a, &b, memory::HOST);
if ( (i != j) && !isEqual(ip, 0.0)) {
status = false;
std::cout << "Vectors " << i << " and " << j << " are not orthogonal!"
<< " Inner product computed: " << ip << ", expected: " << 0.0 << "\n";
<< " Inner product computed: " << ip << ", expected: " << 0.0 << "\n";
break;
}
if ( (i == j) && !isEqual(sqrt(ip), 1.0)) {
status = false;
std::cout << std::setprecision(16);
std::cout << "Vector " << i << " has norm: " << sqrt(ip) << " expected: "<< 1.0 <<"\n";
std::cout << "Vector " << i << " has norm: " << sqrt(ip)
<< " expected: " << 1.0 << "\n";
break;
}
}
}
delete a;
delete b;

return status;
}
}; // class
Expand Down
Loading

0 comments on commit d913854

Please sign in to comment.