Skip to content

Commit

Permalink
Chage coding styles:for internal function use under score and lowcase.
Browse files Browse the repository at this point in the history
  • Loading branch information
hunghaoti committed Feb 25, 2024
1 parent b6cbeaf commit afd9463
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 40 deletions.
53 changes: 27 additions & 26 deletions src/linalg/Arnoldi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ namespace cytnx {
namespace linalg {
typedef Accessor ac;
// <A|B>
static Scalar Dot(const UniTensor &A, const UniTensor &B) {
static Scalar _Dot(const UniTensor &A, const UniTensor &B) {

This comment has been minimized.

Copy link
@kaihsin

kaihsin Feb 25, 2024

Member

We have linalg.Dot already right?

This comment has been minimized.

Copy link
@hunghaoti

hunghaoti Feb 25, 2024

Author Collaborator

The input of linalg.Dot are 'Tensor' object but not UniTensor:

cytnx::linalg::Dot 	( const  Tensor & 	Tl, const Tensor& 	Tr )

and it only support for (1D* 1D, 2D* 2D and ND* 1D). Currently I think we have no Dot function for UniTensor.

return Contract(A.Dagger(), B).item();
}

// resize the matrix (2-rank tensor)
static Tensor ResizeMat(const Tensor &src, const cytnx_uint64 r, const cytnx_uint64 c) {
static Tensor _resize_mat(const Tensor &src, const cytnx_uint64 r, const cytnx_uint64 c) {
const auto min_r = std::min(r, src.shape()[0]);
const auto min_c = std::min(c, src.shape()[1]);
// Tensor dst = src[{ac::range(0,min_r),ac::range(0,min_c)}];
Expand All @@ -36,8 +36,9 @@ namespace cytnx {
}

// Get the indices of the first few order element
std::vector<cytnx_int32> GetFstFewOrderElemIdices(const Tensor &tens, const std::string &which,
const cytnx_int64 k) {
std::vector<cytnx_int32> _get_fst_few_order_elem_indices(const Tensor &tens,
const std::string &which,
const cytnx_int64 k) {
char large_or_small = which[0]; //'S' or 'L'
char metric_type = which[1]; //'M', 'R' or 'I'

Expand Down Expand Up @@ -68,8 +69,8 @@ namespace cytnx {
return indices;
}

bool IsEigvalCvg(const std::vector<Scalar> &eigvals, const std::vector<Scalar> &eigvals_old,
const double cvg_crit) {
bool _is_eigval_cvg(const std::vector<Scalar> &eigvals, const std::vector<Scalar> &eigvals_old,
const double cvg_crit) {
for (cytnx_int32 i = 0; i < eigvals.size(); ++i) {
auto err = abs(eigvals[i] - eigvals_old[i]);
if (err >= cvg_crit) return false;
Expand All @@ -78,8 +79,8 @@ namespace cytnx {
}

// check the residule |Mv - ev| is converged.
bool IsResiduleSmallEnough(LinOp *Hop, const std::vector<Tensor> &eigvecs,
const std::vector<Scalar> &eigvals, const double cvg_crit) {
bool _is_residule_small_enough(LinOp *Hop, const std::vector<Tensor> &eigvecs,
const std::vector<Scalar> &eigvals, const double cvg_crit) {
for (cytnx_int32 i = 0; i < eigvals.size(); ++i) {
auto eigvec = eigvecs[i];
auto eigval = eigvals[i];
Expand All @@ -89,8 +90,8 @@ namespace cytnx {
return true;
}

bool IsResiduleSmallEnough(LinOp *Hop, const std::vector<UniTensor> &eigvecs,
const std::vector<Scalar> &eigvals, const double cvg_crit) {
bool _is_residule_small_enough(LinOp *Hop, const std::vector<UniTensor> &eigvecs,
const std::vector<Scalar> &eigvals, const double cvg_crit) {
for (cytnx_int32 i = 0; i < eigvals.size(); ++i) {
auto eigvec = eigvecs[i];
auto eigval = eigvals[i];
Expand All @@ -100,8 +101,8 @@ namespace cytnx {
return true;
}

std::vector<Tensor> GetEigTens(const std::vector<Tensor> &qs, Tensor eigvec_in_kryv,
std::vector<cytnx_int32> max_indices) {
std::vector<Tensor> _get_eig_tens(const std::vector<Tensor> &qs, Tensor eigvec_in_kryv,
std::vector<cytnx_int32> max_indices) {
auto k = max_indices.size();
cytnx_int64 krydim = eigvec_in_kryv.shape()[0];
auto P_inv = InvM(eigvec_in_kryv).Conj();
Expand All @@ -118,8 +119,8 @@ namespace cytnx {
return eigTens_s;
}

std::vector<UniTensor> GetEigTens(const std::vector<UniTensor> &qs, Tensor eigvec_in_kryv,
std::vector<cytnx_int32> max_indices) {
std::vector<UniTensor> _get_eig_tens(const std::vector<UniTensor> &qs, Tensor eigvec_in_kryv,
std::vector<cytnx_int32> max_indices) {
auto k = max_indices.size();
cytnx_int64 krydim = eigvec_in_kryv.shape()[0];
auto P_inv = InvM(eigvec_in_kryv).Conj();
Expand Down Expand Up @@ -167,22 +168,22 @@ namespace cytnx {
auto h = buffer[i].Norm().item();
kry_mat_buffer[{i - 1, i}] = h;
buffer[i] /= h;
Tensor kry_mat = ResizeMat(kry_mat_buffer, krydim, krydim);
Tensor kry_mat = _resize_mat(kry_mat_buffer, krydim, krydim);

// call Eig to get eigenvalues
auto eigs = Eig(kry_mat, true, true);
// get first few order of eigenvlues
std::vector<cytnx_int32> maxIndices = GetFstFewOrderElemIdices(eigs[0], which, k);
std::vector<cytnx_int32> maxIndices = _get_fst_few_order_elem_indices(eigs[0], which, k);
for (cytnx_int32 ik = 0; ik < k; ++ik) {
auto maxIdx = maxIndices[ik];
eigvals[ik] = eigs[0].storage().at(maxIdx);
}

// check converged
bool is_eigval_cvg = IsEigvalCvg(eigvals, eigvals_old, CvgCrit);
bool is_eigval_cvg = _is_eigval_cvg(eigvals, eigvals_old, CvgCrit);
if (is_eigval_cvg || i == imp_maxiter - 1) {
eigTens_s = GetEigTens(buffer, eigs[1], maxIndices);
bool is_res_small_enough = IsResiduleSmallEnough(Hop, eigTens_s, eigvals, CvgCrit);
eigTens_s = _get_eig_tens(buffer, eigs[1], maxIndices);
bool is_res_small_enough = _is_residule_small_enough(Hop, eigTens_s, eigvals, CvgCrit);
if (is_res_small_enough) {
is_cvg = true;
break;
Expand Down Expand Up @@ -231,29 +232,29 @@ namespace cytnx {
auto nextTens = Hop->matvec(buffer[i - 1]).astype(Hop->dtype());
buffer.push_back(nextTens);
for (cytnx_uint32 j = 0; j < krydim; j++) {
auto h = Dot(buffer[i], buffer[j]).conj();
auto h = _Dot(buffer[i], buffer[j]).conj();
kry_mat_buffer[{i - 1, j}] = h;
buffer[i] -= h * buffer[j];
}
auto h = std::sqrt(static_cast<double>(Dot(buffer[i], buffer[i]).real()));
auto h = std::sqrt(static_cast<double>(_Dot(buffer[i], buffer[i]).real()));
kry_mat_buffer[{i - 1, i}] = h;
buffer[i] /= h;
Tensor kry_mat = ResizeMat(kry_mat_buffer, krydim, krydim);
Tensor kry_mat = _resize_mat(kry_mat_buffer, krydim, krydim);

// call Eig to get eigenvalues
auto eigs = Eig(kry_mat, true, true);
// get first few order of eigenvlues
std::vector<cytnx_int32> maxIndices = GetFstFewOrderElemIdices(eigs[0], which, k);
std::vector<cytnx_int32> maxIndices = _get_fst_few_order_elem_indices(eigs[0], which, k);
for (cytnx_int32 ik = 0; ik < k; ++ik) {
auto maxIdx = maxIndices[ik];
eigvals[ik] = eigs[0].storage().at(maxIdx);
}

// check converged
bool is_eigval_cvg = IsEigvalCvg(eigvals, eigvals_old, CvgCrit);
bool is_eigval_cvg = _is_eigval_cvg(eigvals, eigvals_old, CvgCrit);
if (is_eigval_cvg || i == imp_maxiter - 1) {
eigTens_s = GetEigTens(buffer, eigs[1], maxIndices);
bool is_res_small_enough = IsResiduleSmallEnough(Hop, eigTens_s, eigvals, CvgCrit);
eigTens_s = _get_eig_tens(buffer, eigs[1], maxIndices);
bool is_res_small_enough = _is_residule_small_enough(Hop, eigTens_s, eigvals, CvgCrit);
if (is_res_small_enough) {
is_cvg = true;
break;
Expand Down
28 changes: 14 additions & 14 deletions src/linalg/Lanczos_Exp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,44 +20,44 @@ namespace cytnx {
using namespace std;

// <A|B>
static Scalar Dot(const UniTensor &A, const UniTensor &B) {
static Scalar _Dot(const UniTensor &A, const UniTensor &B) {

This comment has been minimized.

Copy link
@kaihsin

kaihsin Feb 25, 2024

Member

same here

return Contract(A.Dagger(), B).item();
}

// BiCGSTAB method to solve the linear equation
// ref: https://en.wikipedia.org/wiki/Biconjugate_gradient_stabilized_method
UniTensor Invert_BiCGSTAB(LinOp *Hop, const UniTensor &b, const UniTensor &Tin, const int &k,
const double &CvgCrit = 1.0e-12,
const unsigned int &Maxiter = 10000) {
UniTensor _invert_biCGSTAB(LinOp *Hop, const UniTensor &b, const UniTensor &Tin, const int &k,
const double &CvgCrit = 1.0e-12,
const unsigned int &Maxiter = 10000) {
auto I_plus_A_Op = [&](UniTensor A) {
return ((Hop->matvec(A)) / k + A).relabels_(b.labels());
};
auto r = (b - I_plus_A_Op(Tin)).relabels_(b.labels());
auto r0 = r;
auto x = Tin;
auto p = Dot(r0, r);
auto p = _Dot(r0, r);
auto pv = r;
auto p_old = p;
auto pv_old = pv;
auto x_old = x;
auto r_old = r;
for (int i = 1; i < Maxiter; ++i) {
auto v = I_plus_A_Op(pv_old);
auto a = p_old / Dot(r0, v);
auto a = p_old / _Dot(r0, v);
auto h = (x_old + a * pv_old).relabels_(b.labels());
auto s = (r_old - a * v).relabels_(b.labels());
if (abs(Dot(s, s)) < CvgCrit) {
if (abs(_Dot(s, s)) < CvgCrit) {
x = h;
break;
}
auto t = I_plus_A_Op(s);
auto w = Dot(t, s) / Dot(t, t);
auto w = _Dot(t, s) / _Dot(t, t);
x = (h + w * s).relabels_(b.labels());
r = (s - w * t).relabels_(b.labels());
if (abs(Dot(r, r)) < CvgCrit) {
if (abs(_Dot(r, r)) < CvgCrit) {
break;
}
auto p = Dot(r0, r);
auto p = _Dot(r0, r);
auto beta = (p / p_old) * (a / w);
pv = (r + beta * (pv_old - w * v)).relabels_(b.labels());
pv_old = pv;
Expand Down Expand Up @@ -95,21 +95,21 @@ namespace cytnx {
// Call the procedure Invert_A (v[i], k, eps1). The procedure returns a vector w[i], such
// that,
// |(I + A / k )^(−1) v[i] − w[i]| ≤ eps1 |v[i]| .
auto w = Invert_BiCGSTAB(Hop, v, v, k, eps1);
auto w = _invert_biCGSTAB(Hop, v, v, k, eps1);
// auto resi = ((Hop->matvec(w))/k + w).relabels_(v.labels()) - v;

// For j = 0 to i
for (int j = 0; j <= i; ++j) {
// Let a[j,i] = <v[j], w[i]>
as.at({j, i}) = Dot(Vs.at(j), w);
as.at({j, i}) = _Dot(Vs.at(j), w);
}
// Define wp[i] = w[i] - \sum_{j=0}^{j=i} {a[j,i]v[j]}
auto wp = w;
for (int j = 0; j <= i; j++) {
wp -= as.at({j, i}) * Vs.at(j);
}
// Let a[i+1, i] = |wp[i]|, v[i+1]=wp[i] / a[i+1, i]
auto b = std::sqrt(double(Dot(wp, wp).real()));
auto b = std::sqrt(double(_Dot(wp, wp).real()));
if (i < k) {
as.at({i + 1, i}) = b;
v = wp / b;
Expand Down Expand Up @@ -222,7 +222,7 @@ namespace cytnx {

return out;

} // Lanczos_Gnd entry point
} // Lanczos_Exp entry point

} // namespace linalg
} // namespace cytnx
Expand Down

0 comments on commit afd9463

Please sign in to comment.