diff --git a/include/Tensor.hpp b/include/Tensor.hpp index f18f6aec..46cf3e32 100644 --- a/include/Tensor.hpp +++ b/include/Tensor.hpp @@ -521,7 +521,7 @@ namespace cytnx { "[ERROR] Attempt to convert dtype %d (%s) to pointer of type %s", this->dtype(), Type_class::getname(this->dtype()).c_str(), Type_class::getname(Type_class::cy_typeid_v>).c_str()); - return static_cast(this->_impl->_storage._impl->Mem); + return static_cast(this->_impl->_storage._impl->data()); } #ifdef UNI_GPU @@ -542,7 +542,7 @@ namespace cytnx { "[ERROR] Attempt to convert dtype %d (%s) to GPU pointer of type %s", this->dtype(), Type_class::getname(this->dtype()).c_str(), Type_class::getname(Type_class::cy_typeid_gpu_v>).c_str()); - return static_cast(this->_impl->_storage._impl->Mem); + return static_cast(this->_impl->_storage._impl->data()); } #endif @@ -1517,7 +1517,7 @@ namespace cytnx { this->_impl->_storage.resize(oldsize + in.size()); memcpy(((char *)this->_impl->_storage.data()) + oldsize * Type.typeSize(this->dtype()) / sizeof(char), - in._impl->Mem, Type.typeSize(in.dtype()) * in.size()); + in._impl->data(), Type.typeSize(in.dtype()) * in.size()); } /* void append(const Tensor &rhs){ diff --git a/include/backend/Scalar.hpp b/include/backend/Scalar.hpp index 724ef83d..8cdbcea3 100644 --- a/include/backend/Scalar.hpp +++ b/include/backend/Scalar.hpp @@ -393,6 +393,9 @@ namespace cytnx { return nullptr; } + /** + * @deprecated This method is not in use anymore. + */ virtual void *get_raw_address() const { cytnx_error_msg(true, "[ERROR] Void Type Scalar cannot have operation!!%s", "\n"); return nullptr; diff --git a/include/backend/Storage.hpp b/include/backend/Storage.hpp index 6a4b3722..c44e81ff 100644 --- a/include/backend/Storage.hpp +++ b/include/backend/Storage.hpp @@ -26,15 +26,7 @@ namespace cytnx { ///@cond class Storage_base : public intrusive_ptr_base { public: - void *Mem; - // std::vector shape; - - unsigned long long len; // default 0 - unsigned long long cap; // default 0 - unsigned int dtype; // default 0, Void - int device; // default -1, on cpu - - Storage_base() : cap(0), len(0), Mem(NULL), dtype(0), device(-1){}; + Storage_base() = default; // Storage_base(const std::initializer_list &init_shape); // Storage_base(const std::vector &init_shape); Storage_base(const unsigned long long &len_in, const int &device, const bool &init_zero = true); @@ -46,9 +38,13 @@ namespace cytnx { // void Init(const std::initializer_list &init_shape); std::string dtype_str() const; std::string device_str() const; - const unsigned long long &capacity() const { return this->cap; } - const unsigned long long &size() const { return this->len; } - ~Storage_base(); + virtual const unsigned long long capacity() const { + cytnx_error_msg(true, "Not implemented.%s", ""); + } + virtual const unsigned long long size() const { + cytnx_error_msg(true, "Not implemented.%s", ""); + } + virtual ~Storage_base(); template T &at(const cytnx_uint64 &idx) const; @@ -59,7 +55,11 @@ namespace cytnx { template T *data() const; - void *data() const { return this->Mem; } + // `Storage_base` can be instanitiated directly. It's deconstructor calls `data()`, so we cannot + // throw a runtime error here. + virtual void *data() const { return nullptr; } + virtual int dtype() const { return Type.Void; } + virtual int device() const { return Device.cpu; } void _cpy_bool(void *ptr, const std::vector &vin); @@ -87,37 +87,37 @@ namespace cytnx { template void _Init_byptr_safe(T *rawptr, const unsigned long long &len_in) { // check: - if (this->dtype == Type.Float) { + if (this->dtype() == Type.Float) { cytnx_error_msg(typeid(T) != typeid(cytnx_float), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Double) { + } else if (this->dtype() == Type.Double) { cytnx_error_msg(typeid(T) != typeid(cytnx_double), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Uint64) { + } else if (this->dtype() == Type.Uint64) { cytnx_error_msg(typeid(T) != typeid(cytnx_uint64), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Uint32) { + } else if (this->dtype() == Type.Uint32) { cytnx_error_msg(typeid(T) != typeid(cytnx_uint32), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Int64) { + } else if (this->dtype() == Type.Int64) { cytnx_error_msg(typeid(T) != typeid(cytnx_int64), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Int32) { + } else if (this->dtype() == Type.Int32) { cytnx_error_msg(typeid(T) != typeid(cytnx_int32), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.ComplexDouble) { + } else if (this->dtype() == Type.ComplexDouble) { cytnx_error_msg(typeid(T) != typeid(cytnx_complex128), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.ComplexFloat) { + } else if (this->dtype() == Type.ComplexFloat) { cytnx_error_msg(typeid(T) != typeid(cytnx_complex64), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Int16) { + } else if (this->dtype() == Type.Int16) { cytnx_error_msg(typeid(T) != typeid(cytnx_int16), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Uint16) { + } else if (this->dtype() == Type.Uint16) { cytnx_error_msg(typeid(T) != typeid(cytnx_uint16), "%s", "[ERROR _Init_byptr_safe type not match]"); - } else if (this->dtype == Type.Bool) { + } else if (this->dtype() == Type.Bool) { cytnx_error_msg(typeid(T) != typeid(cytnx_bool), "%s", "[ERROR _Init_byptr_safe type not match]"); } else { @@ -146,6 +146,18 @@ namespace cytnx { const std::vector &shape, const std::vector> &locators, const cytnx_uint64 &Nunit, const bool &is_scalar); + + /** + * @brief Drop the ownership of the underlying contiguous memory. + * + * The caller MUST take the ownership before calling this method. Any operation following this + * method causes undefined behavior. + * + * @return The pointer referencing the underlying storage. + * @deprecated This method may be removed without any notification. + */ + virtual void *release() noexcept { return nullptr; } + // these is the one that do the work, and customize with Storage_base // virtual void Init(const std::vector &init_shape); virtual void Init(const unsigned long long &len_in, const int &device = -1, @@ -219,9 +231,6 @@ namespace cytnx { virtual void set_item(const cytnx_uint64 &idx, const cytnx_int16 &val); virtual void set_item(const cytnx_uint64 &idx, const cytnx_uint16 &val); virtual void set_item(const cytnx_uint64 &idx, const cytnx_bool &val); - - // virtual bool approx_eq(const boost::intrusive_ptr &rhs, - // const cytnx_double tol = 1e-8); }; ///@endcond @@ -229,7 +238,8 @@ namespace cytnx { template class StorageImplementation : public Storage_base { public: - StorageImplementation() { this->dtype = Type.cy_typeid(DType()); }; + StorageImplementation() + : capacity_(0), size_(0), start_(nullptr), dtype_(Type.cy_typeid(DType())), device_(-1){}; void Init(const unsigned long long &len_in, const int &device = -1, const bool &init_zero = true); void _Init_byptr(void *rawptr, const unsigned long long &len_in, const int &device = -1, @@ -251,6 +261,29 @@ namespace cytnx { boost::intrusive_ptr real(); boost::intrusive_ptr imag(); + const unsigned long long capacity() const override { return capacity_; } + const unsigned long long size() const override { return size_; } + void *data() const override { return start_; } + int dtype() const override { return dtype_; } + int device() const override { return device_; } + + /** + * @brief Drop the ownership of the underlying contiguous memory. + * + * The caller MUST take the ownership before calling this method. Any operation following this + * method causes undefined behavior. + * + * @return The pointer referencing the underlying storage. + * @deprecated This method may be removed without any notification. + */ + void *release() noexcept override { + void *original_start = start_; + start_ = nullptr; + size_ = 0; + capacity_ = 0; + return original_start; + }; + // generators: void fill(const cytnx_complex128 &val); void fill(const cytnx_complex64 &val); @@ -304,6 +337,12 @@ namespace cytnx { template void SetItem(cytnx_uint64 index, const OtherDType &value); void SetItem(cytnx_uint64 index, const Scalar &value); + + void *start_; + unsigned long long size_; + unsigned long long capacity_; + unsigned int dtype_; + int device_; }; ///@endcond @@ -583,7 +622,7 @@ namespace cytnx { @brief the dtype-id of current Storage, see cytnx::Type for more details. @return [cytnx_uint64] the dtype-id. */ - const unsigned int &dtype() const { return this->_impl->dtype; } + unsigned int dtype() const { return this->_impl->dtype(); } /** @brief the dtype (std::string) of current Storage, see cytnx::Type for more details. @@ -597,7 +636,7 @@ namespace cytnx { @brief the device-id of current Storage, see cytnx::Device for more details. @return [cytnx_int64] the device-id. */ - const int &device() const { return this->_impl->device; } + int device() const { return this->_impl->device(); } /** @brief the device (std::string) of current Storage, see cytnx::Device for more details. @@ -699,7 +738,7 @@ namespace cytnx { @return [cytnx_uint64] */ - const unsigned long long &size() const { return this->_impl->len; } + unsigned long long size() const { return this->_impl->size(); } /** @brief the capacity in the Storage. @@ -708,7 +747,18 @@ namespace cytnx { @return [cytnx_uint64] */ - const unsigned long long &capacity() const { return this->_impl->cap; } + unsigned long long capacity() const { return this->_impl->capacity(); } + + /** + * @brief Drop the ownership of the underlying contiguous memory. + * + * The caller MUST take the ownership before calling this method. Any operation following this + * method causes undefined behavior. + * + * @return The pointer referencing the underlying storage. + * @deprecated This method may be removed without any notification. + */ + void *release() noexcept { return this->_impl->release(); } /** @brief print the info of the Storage, including the device, dtype and size. @@ -818,64 +868,64 @@ namespace cytnx { // check: cytnx_error_msg(1, "[FATAL] ERROR unsupport type%s", "\n"); // this->_impl->Init(vin.size(),device); - // memcpy(this->_impl->Mem,&vin[0],sizeof(T)*vin.size()); + // memcpy(this->_impl->data(),&vin[0],sizeof(T)*vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.ComplexDouble](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_complex128) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_complex128) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.ComplexFloat](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_complex64) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_complex64) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Double](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_double) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_double) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Float](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_float) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_float) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Uint64](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_uint64) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_uint64) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Int64](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_int64) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_int64) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Uint32](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_uint32) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_uint32) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Int32](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_int32) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_int32) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Uint16](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_uint16) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_uint16) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Int16](); this->_impl->Init(vin.size(), device); - memcpy(this->_impl->Mem, &vin[0], sizeof(cytnx_int16) * vin.size()); + memcpy(this->_impl->data(), &vin[0], sizeof(cytnx_int16) * vin.size()); } void _from_vector(const std::vector &vin, const int device = -1) { this->_impl = __SII.USIInit[Type.Bool](); this->_impl->Init(vin.size(), device); - this->_impl->_cpy_bool(this->_impl->Mem, vin); - // memcpy(this->_impl->Mem,vin.data(),sizeof(cytnx_bool)*vin.size()); + this->_impl->_cpy_bool(this->_impl->data(), vin); + // memcpy(this->_impl->data(),vin.data(),sizeof(cytnx_bool)*vin.size()); } /// @endcond diff --git a/pybind/generator_py.cpp b/pybind/generator_py.cpp index 983848e2..06b26d9e 100644 --- a/pybind/generator_py.cpp +++ b/pybind/generator_py.cpp @@ -140,7 +140,7 @@ void generator_binding(py::module &m) { Tensor m; m.Init(shape, dtype); - memcpy(m.storage()._impl->Mem, info.ptr, Totbytes); + memcpy(m.storage()._impl->data(), info.ptr, Totbytes); return m; }); } diff --git a/pybind/storage_py.cpp b/pybind/storage_py.cpp index b4d82aff..2808b667 100644 --- a/pybind/storage_py.cpp +++ b/pybind/storage_py.cpp @@ -1,6 +1,7 @@ -#include #include #include +#include +#include #include #include @@ -34,9 +35,9 @@ void storage_binding(py::module &m) { } // calculate stride: - std::vector stride(1, Type.typeSize(tmpIN.dtype())); + size_t type_size = Type.typeSize(tmpIN.dtype()); + std::vector stride(1, type_size); std::vector shape(1, tmpIN.size()); - // ssize_t accu = tmpIN.size(); py::buffer_info npbuf; std::string chr_dtype; @@ -63,19 +64,11 @@ void storage_binding(py::module &m) { "\n"); } - npbuf = py::buffer_info(tmpIN._impl->Mem, // ptr - Type.typeSize(tmpIN.dtype()), // size of elem + // Call `.release()` to avoid the memory passed to numpy being freed. + npbuf = py::buffer_info(tmpIN.release(), type_size, chr_dtype, // pss format - 1, // rank - shape, // shape - stride // stride - ); - py::array out(npbuf); - // delegate numpy array with it's ptr, and swap a auxiliary ptr for intrusive_ptr to - // free. - void *pswap = malloc(sizeof(bool)); - tmpIN._impl->Mem = pswap; - return out; + /* rank= */ 1, shape, stride); + return py::array(npbuf); }) // construction diff --git a/pybind/tensor_py.cpp b/pybind/tensor_py.cpp index c77c5b64..27031d86 100644 --- a/pybind/tensor_py.cpp +++ b/pybind/tensor_py.cpp @@ -129,21 +129,20 @@ void tensor_binding(py::module &m) { cytnx_error_msg(true, "[ERROR] Void Type Tensor cannot convert to numpy ndarray%s", "\n"); } - npbuf = py::buffer_info(tmpIN.storage()._impl->Mem, // ptr + npbuf = py::buffer_info(tmpIN.storage()._impl->data(), // ptr Type.typeSize(tmpIN.dtype()), // size of elem chr_dtype, // pss format tmpIN.rank(), // rank shape, // shape stride // stride ); - py::array out(npbuf); - // delegate numpy array with it's ptr, and swap a auxiliary ptr for intrusive_ptr to - // free. - if (share_mem == false) { - void *pswap = malloc(sizeof(bool)); - tmpIN.storage()._impl->Mem = pswap; + + if (!share_mem) { + // Avoid the memory passed to numpy being freed. + tmpIN.storage().release(); } - return out; + + return py::array(npbuf); }, py::arg("share_mem") = false) // construction diff --git a/pybind/unitensor_py.cpp b/pybind/unitensor_py.cpp index e87c6f7d..66a893f8 100644 --- a/pybind/unitensor_py.cpp +++ b/pybind/unitensor_py.cpp @@ -33,7 +33,7 @@ class cHclass { } bool exists() const { return this->proxy.exists(); } - int dtype() const { return this->proxy._insimpl->dtype; } + int dtype() const { return this->proxy._insimpl->dtype(); } cytnx_double get_elem_d() const { return cytnx_double(Scalar(this->proxy)); } cytnx_float get_elem_f() const { return cytnx_float(Scalar(this->proxy)); } diff --git a/src/BlockUniTensor.cpp b/src/BlockUniTensor.cpp index 17f2198d..0b8f2079 100644 --- a/src/BlockUniTensor.cpp +++ b/src/BlockUniTensor.cpp @@ -1104,9 +1104,9 @@ namespace cytnx { ms[binx] = this->_blocks[a].shape()[0]; ns[binx] = tmp_Rtn->_blocks[b].shape()[1]; ks[binx] = comm_dim; - LMems[binx] = this->_blocks[a].storage()._impl->Mem; - RMems[binx] = tmp_Rtn->_blocks[b].storage()._impl->Mem; - CMems[binx] = tmp->_blocks[targ_b].storage()._impl->Mem; + LMems[binx] = this->_blocks[a].storage()._impl->data(); + RMems[binx] = tmp_Rtn->_blocks[b].storage()._impl->data(); + CMems[binx] = tmp->_blocks[targ_b].storage()._impl->data(); } else { tmp->_blocks[targ_b] += linalg::Matmul(this->_blocks[a], tmp_Rtn->_blocks[b]) .reshape(tmp->_blocks[targ_b].shape()); diff --git a/src/Tensor.cpp b/src/Tensor.cpp index 526d41b8..ae85b1a7 100644 --- a/src/Tensor.cpp +++ b/src/Tensor.cpp @@ -76,7 +76,7 @@ namespace cytnx { cytnx_error_msg(this->dtype() == 0, "[ERROR] operation not allowed for empty (void) Tensor.%s", "\n"); // dtype()-1 here because we have removed void from the variant - return void_ptr_to_variant_impl(this->_impl->_storage._impl->Mem, this->dtype() - 1, + return void_ptr_to_variant_impl(this->_impl->_storage._impl->data(), this->dtype() - 1, std::make_index_sequence>{}); } @@ -103,7 +103,7 @@ namespace cytnx { "\n"); // dtype()-1 here because we have removed void from the variant return gpu_void_ptr_to_variant_impl( - this->_impl->_storage._impl->Mem, this->dtype() - 1, + this->_impl->_storage._impl->data(), this->dtype() - 1, std::make_index_sequence>{}); } #endif // UNI_GPU diff --git a/src/backend/Scalar.cpp b/src/backend/Scalar.cpp index 8371dd5a..ec060b86 100644 --- a/src/backend/Scalar.cpp +++ b/src/backend/Scalar.cpp @@ -124,7 +124,7 @@ namespace cytnx { return *this; } - bool Scalar::Sproxy::exists() const { return this->_insimpl->dtype != Type.Void; }; + bool Scalar::Sproxy::exists() const { return this->_insimpl->dtype() != Type.Void; }; Scalar Scalar::Sproxy::real() { return Scalar(*this).real(); } Scalar Scalar::Sproxy::imag() { return Scalar(*this).imag(); } diff --git a/src/backend/Storage.cpp b/src/backend/Storage.cpp index e5b6c37a..7f9c60ce 100644 --- a/src/backend/Storage.cpp +++ b/src/backend/Storage.cpp @@ -136,18 +136,21 @@ namespace cytnx { unsigned int IDDs = 999; f.write((char *)&IDDs, sizeof(unsigned int)); - f.write((char *)&this->size(), sizeof(unsigned long long)); - f.write((char *)&this->dtype(), sizeof(unsigned int)); - f.write((char *)&this->device(), sizeof(int)); + auto write_number = [&f](auto number) { + f.write(reinterpret_cast(&number), sizeof(number)); + }; + write_number(this->size()); + write_number(this->dtype()); + write_number(this->device()); // data: if (this->device() == Device.cpu) { - f.write((char *)this->_impl->Mem, Type.typeSize(this->dtype()) * this->size()); + f.write((char *)this->_impl->data(), Type.typeSize(this->dtype()) * this->size()); } else { #ifdef UNI_GPU checkCudaErrors(cudaSetDevice(this->device())); void *htmp = malloc(Type.typeSize(this->dtype()) * this->size()); - checkCudaErrors(cudaMemcpy(htmp, this->_impl->Mem, + checkCudaErrors(cudaMemcpy(htmp, this->_impl->data(), Type.typeSize(this->dtype()) * this->size(), cudaMemcpyDeviceToHost)); f.write((char *)htmp, Type.typeSize(this->dtype()) * this->size()); @@ -165,12 +168,12 @@ namespace cytnx { // data: if (this->device() == Device.cpu) { - f.write((char *)this->_impl->Mem, Type.typeSize(this->dtype()) * this->size()); + f.write((char *)this->_impl->data(), Type.typeSize(this->dtype()) * this->size()); } else { #ifdef UNI_GPU checkCudaErrors(cudaSetDevice(this->device())); void *htmp = malloc(Type.typeSize(this->dtype()) * this->size()); - checkCudaErrors(cudaMemcpy(htmp, this->_impl->Mem, + checkCudaErrors(cudaMemcpy(htmp, this->_impl->data(), Type.typeSize(this->dtype()) * this->size(), cudaMemcpyDeviceToHost)); f.write((char *)htmp, Type.typeSize(this->dtype()) * this->size()); @@ -284,14 +287,14 @@ namespace cytnx { // data: if (dv == Device.cpu) { - f.read((char *)this->_impl->Mem, Type.typeSize(dt) * sz); + f.read((char *)this->_impl->data(), Type.typeSize(dt) * sz); } else { #ifdef UNI_GPU checkCudaErrors(cudaSetDevice(dv)); void *htmp = malloc(Type.typeSize(dt) * sz); f.read((char *)htmp, Type.typeSize(dt) * sz); checkCudaErrors( - cudaMemcpy(this->_impl->Mem, htmp, Type.typeSize(dt) * sz, cudaMemcpyHostToDevice)); + cudaMemcpy(this->_impl->data(), htmp, Type.typeSize(dt) * sz, cudaMemcpyHostToDevice)); free(htmp); #else @@ -311,7 +314,7 @@ namespace cytnx { this->_impl = __SII.USIInit[dtype](); this->_impl->Init(Nelem, Device.cpu); - f.read((char *)this->_impl->Mem, Type.typeSize(dtype) * Nelem); + f.read((char *)this->_impl->data(), Type.typeSize(dtype) * Nelem); } Scalar::Sproxy Storage::operator()(const cytnx_uint64 &idx) { diff --git a/src/backend/StorageImplementation.cpp b/src/backend/StorageImplementation.cpp index 683a551c..914d0cb3 100644 --- a/src/backend/StorageImplementation.cpp +++ b/src/backend/StorageImplementation.cpp @@ -64,34 +64,35 @@ namespace cytnx { template void StorageImplementation::Init(const unsigned long long &len_in, const int &device, const bool &init_zero) { - this->len = len_in; + this->size_ = len_in; // check: // cytnx_error_msg(len_in < 1, "%s", "[ERROR] cannot init a Storage with zero element"); - this->dtype = Type.cy_typeid(DType()); + dtype_ = Type.cy_typeid(DType()); - if (this->len % STORAGE_DEFT_SZ) { - this->cap = ((unsigned long long)((this->len) / STORAGE_DEFT_SZ) + 1) * STORAGE_DEFT_SZ; + if (this->size_ % STORAGE_DEFT_SZ) { + this->capacity_ = + ((unsigned long long)((this->size_) / STORAGE_DEFT_SZ) + 1) * STORAGE_DEFT_SZ; } else { - this->cap = this->len; + this->capacity_ = this->size_; } if (device == Device.cpu) { if (init_zero) - this->Mem = utils_internal::Calloc_cpu(this->cap, sizeof(DType)); + this->start_ = utils_internal::Calloc_cpu(this->capacity_, sizeof(DType)); else - this->Mem = utils_internal::Malloc_cpu(this->cap * sizeof(DType)); + this->start_ = utils_internal::Malloc_cpu(this->capacity_ * sizeof(DType)); } else { #ifdef UNI_GPU cytnx_error_msg(device >= Device.Ngpus, "%s", "[ERROR] invalid device."); checkCudaErrors(cudaSetDevice(device)); - this->Mem = utils_internal::cuCalloc_gpu(this->cap, sizeof(DType)); + this->start_ = utils_internal::cuCalloc_gpu(this->capacity_, sizeof(DType)); #else cytnx_error_msg(1, "%s", "[ERROR] cannot init a Storage on gpu without CUDA support."); #endif } - this->device = device; + this->device_ = device; } template @@ -101,24 +102,24 @@ namespace cytnx { //[note], this is an internal function, the device should match the device_id that allocate the // pointer if the pointer is on GPU device. - this->Mem = rawptr; - this->len = len_in; + this->start_ = rawptr; + this->size_ = len_in; if (iscap) { - this->cap = cap_in; + this->capacity_ = cap_in; } else { - this->cap = len_in; + this->capacity_ = len_in; } - cytnx_error_msg(this->cap % STORAGE_DEFT_SZ != 0, + cytnx_error_msg(this->capacity_ % STORAGE_DEFT_SZ != 0, "[ERROR] _Init_by_ptr cannot have not %dx cap_in.", STORAGE_DEFT_SZ); #ifdef UNI_DEBUG cytnx_error_msg(len_in < 1, "%s", "[ERROR] _Init_by_ptr cannot have len_in < 1."); - cytnx_error_msg(this->cap < this->len, "%s", + cytnx_error_msg(this->capacity_ < this->size_, "%s", "[ERROR] _Init_by_ptr cannot have capacity < size."); #endif - this->dtype = Type.cy_typeid(DType()); - this->device = device; + this->dtype_ = Type.cy_typeid(DType()); + this->device_ = device; } template @@ -130,14 +131,14 @@ namespace cytnx { template boost::intrusive_ptr StorageImplementation::clone() { boost::intrusive_ptr out(new StorageImplementation()); - out->Init(this->len, this->device); - if (this->device == Device.cpu) { - memcpy(out->Mem, this->Mem, sizeof(DType) * this->len); + out->Init(this->size_, this->device_); + if (this->device_ == Device.cpu) { + memcpy(out->data(), this->start_, sizeof(DType) * this->size_); } else { #ifdef UNI_GPU - checkCudaErrors(cudaSetDevice(this->device)); - checkCudaErrors( - cudaMemcpy(out->Mem, this->Mem, sizeof(DType) * this->len, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaSetDevice(this->device_)); + checkCudaErrors(cudaMemcpy(out->data(), this->start_, sizeof(DType) * this->size_, + cudaMemcpyDeviceToDevice)); #else cytnx_error_msg(1, "%s", "[ERROR] cannot clone a Storage on gpu without CUDA support."); #endif @@ -150,11 +151,11 @@ namespace cytnx { const std::vector &mapper, const std::vector &invmapper) { boost::intrusive_ptr tmp(this); - if (this->device == Device.cpu) { + if (this->device_ == Device.cpu) { utils_internal::MoveMemoryCpu(tmp, old_shape, mapper, invmapper, 1); } else { #ifdef UNI_GPU - checkCudaErrors(cudaSetDevice(this->device)); + checkCudaErrors(cudaSetDevice(this->device_)); utils_internal::MoveMemoryGpu(tmp, old_shape, mapper, invmapper, 1); #else cytnx_error_msg(1, "%s", "[ERROR][Internal] try to call GPU section without CUDA support"); @@ -167,11 +168,11 @@ namespace cytnx { const std::vector &old_shape, const std::vector &mapper, const std::vector &invmapper) { boost::intrusive_ptr tmp(this); - if (this->device == Device.cpu) { + if (this->device_ == Device.cpu) { return utils_internal::MoveMemoryCpu(tmp, old_shape, mapper, invmapper, 0); } else { #ifdef UNI_GPU - checkCudaErrors(cudaSetDevice(this->device)); + checkCudaErrors(cudaSetDevice(this->device_)); return utils_internal::MoveMemoryGpu(tmp, old_shape, mapper, invmapper, 0); #else cytnx_error_msg(1, "%s", "[ERROR][Internal] try to call GPU section without CUDA support"); @@ -182,18 +183,18 @@ namespace cytnx { template void StorageImplementation::to_(const int &device) { - if (this->device != device) { - if (this->device == Device.cpu) { + if (this->device_ != device) { + if (this->device_ == Device.cpu) { // here, cpu->gpu with gid=device #ifdef UNI_GPU cytnx_error_msg(device >= Device.Ngpus, "%s", "[ERROR] invalid device."); cudaSetDevice(device); - void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->cap); + void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->capacity_); checkCudaErrors( - cudaMemcpy(dtmp, this->Mem, sizeof(DType) * this->len, cudaMemcpyHostToDevice)); - free(this->Mem); - this->Mem = dtmp; - this->device = device; + cudaMemcpy(dtmp, this->start_, sizeof(DType) * this->size_, cudaMemcpyHostToDevice)); + free(this->start_); + this->start_ = dtmp; + this->device_ = device; #else cytnx_error_msg(1, "%s", "[ERROR] try to move from cpu(Host) to gpu without CUDA support."); #endif @@ -201,23 +202,23 @@ namespace cytnx { #ifdef UNI_GPU if (device == Device.cpu) { // here, gpu->cpu - cudaSetDevice(this->device); - void *htmp = malloc(sizeof(DType) * this->cap); + cudaSetDevice(this->device_); + void *htmp = malloc(sizeof(DType) * this->capacity_); checkCudaErrors( - cudaMemcpy(htmp, this->Mem, sizeof(DType) * this->len, cudaMemcpyDeviceToHost)); - cudaFree(this->Mem); - this->Mem = htmp; - this->device = device; + cudaMemcpy(htmp, this->start_, sizeof(DType) * this->size_, cudaMemcpyDeviceToHost)); + cudaFree(this->start_); + this->start_ = htmp; + this->device_ = device; } else { // here, gpu->gpu cytnx_error_msg(device >= Device.Ngpus, "%s", "[ERROR] invalid device."); cudaSetDevice(device); - void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->cap); + void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->capacity_); checkCudaErrors( - cudaMemcpyPeer(dtmp, device, this->Mem, this->device, sizeof(DType) * this->len)); - cudaFree(this->Mem); - this->Mem = dtmp; - this->device = device; + cudaMemcpyPeer(dtmp, device, this->start_, this->device_, sizeof(DType) * this->size_)); + cudaFree(this->start_); + this->start_ = dtmp; + this->device_ = device; } #else cytnx_error_msg( @@ -229,21 +230,21 @@ namespace cytnx { } template boost::intrusive_ptr StorageImplementation::to(const int &device) { - // Here, we follow pytorch scheme. if the device is the same as this->device, then return this + // Here, we follow pytorch scheme. if the device is the same as this->device_, then return this // (python self) otherwise, return a clone on different device. - if (this->device == device) { + if (this->device_ == device) { return this; } else { - if (this->device == Device.cpu) { + if (this->device_ == Device.cpu) { // here, cpu->gpu with gid=device #ifdef UNI_GPU cytnx_error_msg(device >= Device.Ngpus, "%s", "[ERROR] invalid device."); cudaSetDevice(device); - void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->cap); + void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->capacity_); checkCudaErrors( - cudaMemcpy(dtmp, this->Mem, sizeof(DType) * this->len, cudaMemcpyHostToDevice)); + cudaMemcpy(dtmp, this->start_, sizeof(DType) * this->size_, cudaMemcpyHostToDevice)); boost::intrusive_ptr out(new StorageImplementation()); - out->_Init_byptr(dtmp, this->len, device, true, this->cap); + out->_Init_byptr(dtmp, this->size_, device, true, this->capacity_); return out; #else cytnx_error_msg(1, "%s", "[ERROR] try to move from cpu(Host) to gpu without CUDA support."); @@ -253,22 +254,22 @@ namespace cytnx { #ifdef UNI_GPU if (device == Device.cpu) { // here, gpu->cpu - cudaSetDevice(this->device); - void *htmp = malloc(sizeof(DType) * this->cap); + cudaSetDevice(this->device_); + void *htmp = malloc(sizeof(DType) * this->capacity_); checkCudaErrors( - cudaMemcpy(htmp, this->Mem, sizeof(DType) * this->len, cudaMemcpyDeviceToHost)); + cudaMemcpy(htmp, this->start_, sizeof(DType) * this->size_, cudaMemcpyDeviceToHost)); boost::intrusive_ptr out(new StorageImplementation()); - out->_Init_byptr(htmp, this->len, device, true, this->cap); + out->_Init_byptr(htmp, this->size_, device, true, this->capacity_); return out; } else { // here, gpu->gpu cytnx_error_msg(device >= Device.Ngpus, "%s", "[ERROR] invalid device."); cudaSetDevice(device); - void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->cap); + void *dtmp = utils_internal::cuMalloc_gpu(sizeof(DType) * this->capacity_); checkCudaErrors( - cudaMemcpyPeer(dtmp, device, this->Mem, this->device, sizeof(DType) * this->len)); + cudaMemcpyPeer(dtmp, device, this->start_, this->device_, sizeof(DType) * this->size_)); boost::intrusive_ptr out(new StorageImplementation()); - out->_Init_byptr(dtmp, this->len, device, true, this->cap); + out->_Init_byptr(dtmp, this->size_, device, true, this->capacity_); return out; } #else @@ -291,22 +292,22 @@ namespace cytnx { for (cytnx_uint64 i = 0; i < shape.size(); i++) { Ne *= shape[i]; } - if (Ne != this->len) { + if (Ne != this->size_) { cytnx_error_msg(1, "%s", "PrintElem_byShape, the number of shape not match with the No. of elements."); } - if (len == 0) { + if (size_ == 0) { os << "[ "; os << "\nThe Storage has not been allocated or linked.\n"; os << "]\n"; } else { - os << std::endl << "Total elem: " << this->len << "\n"; + os << std::endl << "Total elem: " << this->size_ << "\n"; - os << "type : " << Type.getname(this->dtype) << std::endl; + os << "type : " << Type.getname(this->dtype_) << std::endl; - int atDevice = this->device; - os << Device.getname(this->device) << std::endl; + int atDevice = this->device_; + os << Device.getname(this->device_) << std::endl; sprintf(buffer, "%s", "Shape :"); os << std::string(buffer); @@ -319,14 +320,14 @@ namespace cytnx { os << ")" << std::endl; // temporary move to cpu for printing. - if (this->device != Device.cpu) { + if (this->device_ != Device.cpu) { this->to_(Device.cpu); } std::vector stk(shape.size(), 0), stk2; cytnx_uint64 s; - DType *elem_ptr_ = reinterpret_cast(this->Mem); + DType *elem_ptr_ = reinterpret_cast(this->start_); if (mapper.empty()) { cytnx_uint64 cnt = 0; @@ -445,9 +446,9 @@ namespace cytnx { template void StorageImplementation::print_elems() { - DType *elem_ptr_ = reinterpret_cast(this->Mem); + DType *elem_ptr_ = reinterpret_cast(this->start_); cout << "[ "; - for (unsigned long long cnt = 0; cnt < this->len; cnt++) { + for (unsigned long long cnt = 0; cnt < this->size_; cnt++) { PrintValueAndSpace(std::cout, elem_ptr_[cnt]); } std::cout << "]" << std::endl; @@ -500,12 +501,12 @@ namespace cytnx { template void StorageImplementation::set_zeros() { - if (this->device == Device.cpu) { - utils_internal::SetZeros(this->Mem, sizeof(DType) * this->len); + if (this->device_ == Device.cpu) { + utils_internal::SetZeros(this->start_, sizeof(DType) * this->size_); } else { #ifdef UNI_GPU - checkCudaErrors(cudaSetDevice(this->device)); - utils_internal::cuSetZeros(this->Mem, sizeof(DType) * this->len); + checkCudaErrors(cudaSetDevice(this->device_)); + utils_internal::cuSetZeros(this->start_, sizeof(DType) * this->size_); #else cytnx_error_msg(1, "[ERROR][set_zeros] fatal, the storage is on gpu without CUDA support.%s", "\n"); @@ -517,26 +518,26 @@ namespace cytnx { void StorageImplementation::resize(const cytnx_uint64 &newsize) { // cytnx_error_msg(newsize < 1,"[ERROR]resize should have size > 0%s","\n"); - if (newsize > this->cap) { + if (newsize > this->capacity_) { if (newsize % STORAGE_DEFT_SZ) { - this->cap = ((unsigned long long)((newsize) / STORAGE_DEFT_SZ) + 1) * STORAGE_DEFT_SZ; + this->capacity_ = ((unsigned long long)((newsize) / STORAGE_DEFT_SZ) + 1) * STORAGE_DEFT_SZ; } else { - this->cap = newsize; + this->capacity_ = newsize; } - if (this->device == Device.cpu) { - void *htmp = calloc(this->cap, sizeof(DType)); - memcpy(htmp, this->Mem, sizeof(DType) * this->len); - free(this->Mem); - this->Mem = htmp; + if (this->device_ == Device.cpu) { + void *htmp = calloc(this->capacity_, sizeof(DType)); + memcpy(htmp, this->start_, sizeof(DType) * this->size_); + free(this->start_); + this->start_ = htmp; } else { #ifdef UNI_GPU - cytnx_error_msg(device >= Device.Ngpus, "%s", "[ERROR] invalid device."); - cudaSetDevice(device); - void *dtmp = utils_internal::cuCalloc_gpu(this->cap, sizeof(DType)); + cytnx_error_msg(device_ >= Device.Ngpus, "%s", "[ERROR] invalid device."); + cudaSetDevice(device_); + void *dtmp = utils_internal::cuCalloc_gpu(this->capacity_, sizeof(DType)); checkCudaErrors( - cudaMemcpyPeer(dtmp, device, this->Mem, this->device, sizeof(DType) * this->len)); - cudaFree(this->Mem); - this->Mem = dtmp; + cudaMemcpyPeer(dtmp, device_, this->start_, this->device_, sizeof(DType) * this->size_)); + cudaFree(this->start_); + this->start_ = dtmp; #else cytnx_error_msg( 1, "%s", @@ -544,7 +545,7 @@ namespace cytnx { #endif } } - this->len = newsize; + this->size_ = newsize; } template @@ -603,29 +604,29 @@ namespace cytnx { return nullptr; } else { using ValueType = typename DType::value_type; - if (this->device == Device.cpu) { + if (this->device_ == Device.cpu) { boost::intrusive_ptr out(new StorageImplementation()); - void *dtmp = malloc(sizeof(ValueType) * this->cap); + void *dtmp = malloc(sizeof(ValueType) * this->capacity_); if constexpr (std::is_same_v) { - utils_internal::Complexmem_cpu_cdtd(dtmp, this->Mem, this->len, true); + utils_internal::Complexmem_cpu_cdtd(dtmp, this->start_, this->size_, true); } else { // std::is_same_v - utils_internal::Complexmem_cpu_cftf(dtmp, this->Mem, this->len, true); + utils_internal::Complexmem_cpu_cftf(dtmp, this->start_, this->size_, true); } - out->_Init_byptr(dtmp, this->len, this->device, true, this->cap); + out->_Init_byptr(dtmp, this->size_, this->device_, true, this->capacity_); return out; } else { #ifdef UNI_GPU boost::intrusive_ptr out(new StorageImplementation()); - cudaSetDevice(device); - void *dtmp = utils_internal::cuMalloc_gpu(sizeof(ValueType) * this->cap); + cudaSetDevice(device_); + void *dtmp = utils_internal::cuMalloc_gpu(sizeof(ValueType) * this->capacity_); if constexpr (std::is_same_v) { - utils_internal::cuComplexmem_gpu_cdtd(dtmp, this->Mem, this->len, true); + utils_internal::cuComplexmem_gpu_cdtd(dtmp, this->start_, this->size_, true); } else { // std::is_same_v - utils_internal::cuComplexmem_gpu_cftf(dtmp, this->Mem, this->len, true); + utils_internal::cuComplexmem_gpu_cftf(dtmp, this->start_, this->size_, true); } - out->_Init_byptr(dtmp, this->len, this->device, true, this->cap); + out->_Init_byptr(dtmp, this->size_, this->device_, true, this->capacity_); return out; #else cytnx_error_msg( @@ -644,29 +645,29 @@ namespace cytnx { return nullptr; } else { using ValueType = typename DType::value_type; - if (this->device == Device.cpu) { + if (this->device_ == Device.cpu) { boost::intrusive_ptr out(new StorageImplementation()); - void *dtmp = malloc(sizeof(ValueType) * this->cap); + void *dtmp = malloc(sizeof(ValueType) * this->capacity_); if constexpr (std::is_same_v) { - utils_internal::Complexmem_cpu_cdtd(dtmp, this->Mem, this->len, false); + utils_internal::Complexmem_cpu_cdtd(dtmp, this->start_, this->size_, false); } else { // std::is_same_v - utils_internal::Complexmem_cpu_cftf(dtmp, this->Mem, this->len, false); + utils_internal::Complexmem_cpu_cftf(dtmp, this->start_, this->size_, false); } - out->_Init_byptr(dtmp, this->len, this->device, true, this->cap); + out->_Init_byptr(dtmp, this->size_, this->device_, true, this->capacity_); return out; } else { #ifdef UNI_GPU boost::intrusive_ptr out(new StorageImplementation()); - cudaSetDevice(device); - void *dtmp = utils_internal::cuMalloc_gpu(sizeof(ValueType) * this->cap); + cudaSetDevice(device_); + void *dtmp = utils_internal::cuMalloc_gpu(sizeof(ValueType) * this->capacity_); if constexpr (std::is_same_v) { - utils_internal::cuComplexmem_gpu_cdtd(dtmp, this->Mem, this->len, false); + utils_internal::cuComplexmem_gpu_cdtd(dtmp, this->start_, this->size_, false); } else { // std::is_same_v - utils_internal::cuComplexmem_gpu_cftf(dtmp, this->Mem, this->len, false); + utils_internal::cuComplexmem_gpu_cftf(dtmp, this->start_, this->size_, false); } - out->_Init_byptr(dtmp, this->len, this->device, true, this->cap); + out->_Init_byptr(dtmp, this->size_, this->device_, true, this->capacity_); return out; #else cytnx_error_msg( @@ -742,12 +743,12 @@ namespace cytnx { Type.getname(Type.cy_typeid(DType())).c_str()); return; } else { - if (this->device == Device.cpu) { - utils_internal::FillCpu(this->Mem, static_cast(value), this->len); + if (this->device_ == Device.cpu) { + utils_internal::FillCpu(this->start_, static_cast(value), this->size_); } else { #ifdef UNI_GPU - checkCudaErrors(cudaSetDevice(this->device)); - utils_internal::FillGpu(this->Mem, static_cast(value), this->len); + checkCudaErrors(cudaSetDevice(this->device_)); + utils_internal::FillGpu(this->start_, static_cast(value), this->size_); #else cytnx_error_msg(true, "[ERROR][fill] fatal internal, %s", "storage is on gpu without CUDA support\n"); @@ -760,12 +761,12 @@ namespace cytnx { template void StorageImplementation::Append(const OtherDType &value) { if constexpr (std::is_constructible_v) { - if (this->len == this->cap) { - this->resize(this->len + 1); + if (this->size_ == this->capacity_) { + this->resize(this->size_ + 1); } else { - ++this->len; + ++this->size_; } - this->at(this->len - 1) = value; + this->at(this->size_ - 1) = value; } else { cytnx_error_msg(true, "[ERROR] cannot append %s value into %s container", Type.getname(Type.cy_typeid(OtherDType())).c_str(), @@ -775,17 +776,17 @@ namespace cytnx { template void StorageImplementation::Append(const Scalar &value) { - if (this->len == this->cap) { - this->resize(this->len + 1); + if (this->size_ == this->capacity_) { + this->resize(this->size_ + 1); } else { - ++this->len; + ++this->size_; } if constexpr (is_same_v) { - this->at(this->len - 1) = complex128(value); + this->at(this->size_ - 1) = complex128(value); } else if constexpr (is_same_v) { - this->at(this->len - 1) = complex64(value); + this->at(this->size_ - 1) = complex64(value); } else { - this->at(this->len - 1) = static_cast(value); + this->at(this->size_ - 1) = static_cast(value); } } diff --git a/src/backend/Storage_base.cpp b/src/backend/Storage_base.cpp index da2f69ff..2e1c49d3 100644 --- a/src/backend/Storage_base.cpp +++ b/src/backend/Storage_base.cpp @@ -95,21 +95,22 @@ namespace cytnx { boost::intrusive_ptr Storage_base::astype(const unsigned int &dtype) { boost::intrusive_ptr out(new Storage_base()); - if (dtype == this->dtype) return boost::intrusive_ptr(this); + if (dtype == this->dtype()) return boost::intrusive_ptr(this); - if (this->device == Device.cpu) { - if (utils_internal::uii.ElemCast[this->dtype][dtype] == NULL) { + if (this->device() == Device.cpu) { + if (utils_internal::uii.ElemCast[this->dtype()][dtype] == NULL) { cytnx_error_msg(1, "[ERROR] not support type with dtype=%d", dtype); } else { - utils_internal::uii.ElemCast[this->dtype][dtype](this, out, this->len, 1); + utils_internal::uii.ElemCast[this->dtype()][dtype](this, out, this->size(), 1); } } else { #ifdef UNI_GPU - if (utils_internal::uii.cuElemCast[this->dtype][dtype] == NULL) { + if (utils_internal::uii.cuElemCast[this->dtype()][dtype] == NULL) { cytnx_error_msg(1, "[ERROR] not support type with dtype=%d", dtype); } else { - // std::cout << this->device << std::endl; - utils_internal::uii.cuElemCast[this->dtype][dtype](this, out, this->len, this->device); + // std::cout << this->device() << std::endl; + utils_internal::uii.cuElemCast[this->dtype()][dtype](this, out, this->size(), + this->device()); } #else cytnx_error_msg( @@ -130,8 +131,8 @@ namespace cytnx { return out; } - string Storage_base::dtype_str() const { return Type.getname(this->dtype); } - string Storage_base::device_str() const { return Device.getname(this->device); } + string Storage_base::dtype_str() const { return Type.getname(this->dtype()); } + string Storage_base::device_str() const { return Device.getname(this->device()); } void Storage_base::_Init_byptr(void *rawptr, const unsigned long long &len_in, const int &device, const bool &iscap, const unsigned long long &cap_in) { cytnx_error_msg(1, "%s", "[ERROR] call _Init_byptr in base"); @@ -139,12 +140,12 @@ namespace cytnx { Storage_base::~Storage_base() { // cout << "delet" << endl; - if (Mem != NULL) { - if (this->device == Device.cpu) { - free(Mem); + if (this->data() != NULL) { + if (this->device() == Device.cpu) { + free(this->data()); } else { #ifdef UNI_GPU - checkCudaErrors(cudaFree(Mem)); + checkCudaErrors(cudaFree(this->data())); #else cytnx_error_msg(1, "%s", "[ERROR] trying to free an GPU memory without CUDA install"); #endif @@ -181,8 +182,8 @@ namespace cytnx { void Storage_base::print_info() { cout << "dtype : " << this->dtype_str() << endl; - cout << "device: " << Device.getname(this->device) << endl; - cout << "size : " << this->len << endl; + cout << "device: " << Device.getname(this->device()) << endl; + cout << "size : " << this->size() << endl; } void Storage_base::print_elems() { cytnx_error_msg(1, "%s", "[ERROR] call print_elems directly on Void Storage."); @@ -203,10 +204,10 @@ namespace cytnx { const std::vector> &locators, const cytnx_uint64 &Nunit) { if (User_debug) - cytnx_error_msg(out->dtype != this->dtype, "%s", "[ERROR][DEBUG] %s", + cytnx_error_msg(out->dtype() != this->dtype(), "%s", "[ERROR][DEBUG] %s", "internal, the output dtype does not match current storage dtype.\n"); - cytnx_error_msg(this->device != out->device, + cytnx_error_msg(this->device() != out->device(), "[ERROR] cannot GetElem_byShape_v2 between different device.%s", "\n"); cytnx_uint64 TotalElem = 1; for (cytnx_uint32 i = 0; i < locators.size(); i++) { @@ -235,18 +236,18 @@ namespace cytnx { // std::cout << c_offj << std::endl; // std::cout << new_offj << std::endl; // std::cout << TotalElem << std::endl; - if (this->device == Device.cpu) { - utils_internal::uii.GetElems_conti_ii[this->dtype](out->Mem, this->Mem, c_offj, new_offj, - locators, TotalElem, Nunit); + if (this->device() == Device.cpu) { + utils_internal::uii.GetElems_conti_ii[this->dtype()](out->data(), this->data(), c_offj, + new_offj, locators, TotalElem, Nunit); } else { #ifdef UNI_GPU - checkCudaErrors(cudaSetDevice(this->device)); + checkCudaErrors(cudaSetDevice(this->device())); // cytnx_error_msg(true, // "[Developing][GPU Getelem v2][Note, currently slice on GPU is disabled for // " "further inspection]%s", // "\n"); - utils_internal::uii.cuGetElems_conti_ii[this->dtype](out->Mem, this->Mem, c_offj, new_offj, - locators, TotalElem, Nunit); + utils_internal::uii.cuGetElems_conti_ii[this->dtype()](out->data(), this->data(), c_offj, + new_offj, locators, TotalElem, Nunit); #else cytnx_error_msg(true, "[ERROR][GetElem_byShape] fatal internal%s", "the Storage is set on gpu without CUDA support\n"); @@ -262,10 +263,10 @@ namespace cytnx { if (User_debug) { cytnx_error_msg(shape.size() != len.size(), "%s", "[ERROR][DEBUG] internal Storage, shape.size() != len.size()"); - cytnx_error_msg(out->dtype != this->dtype, "%s", "[ERROR][DEBUG] %s", + cytnx_error_msg(out->dtype() != this->dtype(), "%s", "[ERROR][DEBUG] %s", "internal, the output dtype does not match current storage dtype.\n"); } - cytnx_error_msg(this->device != out->device, + cytnx_error_msg(this->device() != out->device(), "[ERROR] cannot GetElem_byShape between different device.%s", "\n"); // std::cout <<"=====" << len.size() << " " << locators.size() << std::endl; @@ -296,14 +297,14 @@ namespace cytnx { offj[i] = c_offj[mapper[i]]; } - if (this->device == Device.cpu) { - utils_internal::uii.GetElems_ii[this->dtype](out->Mem, this->Mem, offj, new_offj, locators, - TotalElem); + if (this->device() == Device.cpu) { + utils_internal::uii.GetElems_ii[this->dtype()](out->data(), this->data(), offj, new_offj, + locators, TotalElem); } else { #ifdef UNI_GPU - checkCudaErrors(cudaSetDevice(this->device)); - utils_internal::uii.cuGetElems_ii[this->dtype](out->Mem, this->Mem, offj, new_offj, locators, - TotalElem); + checkCudaErrors(cudaSetDevice(this->device())); + utils_internal::uii.cuGetElems_ii[this->dtype()](out->data(), this->data(), offj, new_offj, + locators, TotalElem); #else cytnx_error_msg(true, "[ERROR][GetElem_byShape] fatal internal%s", "the Storage is set on gpu without CUDA support\n"); @@ -322,7 +323,7 @@ namespace cytnx { cytnx_error_msg(shape.size() != len.size(), "%s", "[ERROR][DEBUG] internal Storage, shape.size() != len.size()"); - cytnx_error_msg(this->device != in->device, + cytnx_error_msg(this->device() != in->device(), "[ERROR] cannot SetElem_byShape between different device.%s", "\n"); // std::cout <<"=====" << len.size() << " " << locators.size() << std::endl; // create new instance: @@ -355,21 +356,21 @@ namespace cytnx { offj[i] = c_offj[mapper[i]]; } - if (this->device == Device.cpu) { - if (utils_internal::uii.SetElems_ii[in->dtype][this->dtype] == NULL) { + if (this->device() == Device.cpu) { + if (utils_internal::uii.SetElems_ii[in->dtype()][this->dtype()] == NULL) { cytnx_error_msg(true, "[ERROR] %s", "cannot assign complex element to real container.\n"); } - utils_internal::uii.SetElems_ii[in->dtype][this->dtype](in->Mem, this->Mem, c_offj, new_offj, - locators, TotalElem, is_scalar); + utils_internal::uii.SetElems_ii[in->dtype()][this->dtype()]( + in->data(), this->data(), c_offj, new_offj, locators, TotalElem, is_scalar); } else { #ifdef UNI_GPU - if (utils_internal::uii.cuSetElems_ii[in->dtype][this->dtype] == NULL) { + if (utils_internal::uii.cuSetElems_ii[in->dtype()][this->dtype()] == NULL) { cytnx_error_msg(true, "%s", "[ERROR] %s", "cannot assign complex element to real container.\n"); } - checkCudaErrors(cudaSetDevice(this->device)); - utils_internal::uii.cuSetElems_ii[in->dtype][this->dtype](in->Mem, this->Mem, offj, new_offj, - locators, TotalElem, is_scalar); + checkCudaErrors(cudaSetDevice(this->device())); + utils_internal::uii.cuSetElems_ii[in->dtype()][this->dtype()]( + in->data(), this->data(), offj, new_offj, locators, TotalElem, is_scalar); #else cytnx_error_msg(true, "[ERROR][SetElem_byShape] fatal internal%s", "the Storage is set on gpu without CUDA support\n"); @@ -384,7 +385,7 @@ namespace cytnx { // plan: we assume in is contiguous for now! // - cytnx_error_msg(this->device != in->device, + cytnx_error_msg(this->device() != in->device(), "[ERROR] cannot SetElem_byShape_v2 between different device.%s", "\n"); // std::cout <<"=====" << len.size() << " " << locators.size() << std::endl; @@ -418,20 +419,20 @@ namespace cytnx { new_accu *= shape[i]; } - if (this->device == Device.cpu) { - if (utils_internal::uii.SetElems_conti_ii[in->dtype][this->dtype] == NULL) { + if (this->device() == Device.cpu) { + if (utils_internal::uii.SetElems_conti_ii[in->dtype()][this->dtype()] == NULL) { cytnx_error_msg(true, "[ERROR] %s", "cannot assign complex element to real container.\n"); } - utils_internal::uii.SetElems_conti_ii[in->dtype][this->dtype]( - in->Mem, this->Mem, c_offj, new_offj, locators, TotalElem, Nunit, is_scalar); + utils_internal::uii.SetElems_conti_ii[in->dtype()][this->dtype()]( + in->data(), this->data(), c_offj, new_offj, locators, TotalElem, Nunit, is_scalar); } else { #ifdef UNI_GPU - if (utils_internal::uii.cuSetElems_conti_ii[in->dtype][this->dtype] == NULL) { + if (utils_internal::uii.cuSetElems_conti_ii[in->dtype()][this->dtype()] == NULL) { cytnx_error_msg(true, "[ERROR] %s", "cannot assign complex element to real container.\n"); } - checkCudaErrors(cudaSetDevice(this->device)); - utils_internal::uii.cuSetElems_conti_ii[in->dtype][this->dtype]( - in->Mem, this->Mem, c_offj, new_offj, locators, TotalElem, Nunit, is_scalar); + checkCudaErrors(cudaSetDevice(this->device())); + utils_internal::uii.cuSetElems_conti_ii[in->dtype()][this->dtype()]( + in->data(), this->data(), c_offj, new_offj, locators, TotalElem, Nunit, is_scalar); // cytnx_error_msg(true, "[Developing][SetElem on gpu is now down for further inspection]%s", // "\n"); #else @@ -521,130 +522,130 @@ namespace cytnx { template <> float *Storage_base::data() const { // check type - cytnx_error_msg(dtype != Type.Float, + cytnx_error_msg(this->dtype() != Type.Float, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> double *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Double, + cytnx_error_msg(this->dtype() != Type.Double, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> std::complex *Storage_base::data>() const { cytnx_error_msg( - dtype != Type.ComplexDouble, + this->dtype() != Type.ComplexDouble, "[ERROR] type mismatch. try to get < complex > type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU - cytnx_error_msg(this->device != Device.cpu, "%s", + cytnx_error_msg(this->device() != Device.cpu, "%s", "[ERROR] the Storage is on GPU but try to get with CUDA complex type " "complex. use type instead."); cudaDeviceSynchronize(); #endif - return static_cast *>(this->Mem); + return static_cast *>(this->data()); } template <> std::complex *Storage_base::data>() const { cytnx_error_msg( - dtype != Type.ComplexFloat, + this->dtype() != Type.ComplexFloat, "[ERROR] type mismatch. try to get < complex > type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU - cytnx_error_msg(this->device != Device.cpu, "%s", + cytnx_error_msg(this->device() != Device.cpu, "%s", "[ERROR] the Storage is on GPU but try to get with CUDA complex type " "complex. use type instead."); cudaDeviceSynchronize(); #endif - return static_cast *>(this->Mem); + return static_cast *>(this->data()); } template <> uint32_t *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Uint32, + cytnx_error_msg(this->dtype() != Type.Uint32, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> int32_t *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Int32, + cytnx_error_msg(this->dtype() != Type.Int32, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> uint64_t *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Uint64, + cytnx_error_msg(this->dtype() != Type.Uint64, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> int64_t *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Int64, + cytnx_error_msg(this->dtype() != Type.Int64, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> int16_t *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Int16, + cytnx_error_msg(this->dtype() != Type.Int16, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> uint16_t *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Uint16, + cytnx_error_msg(this->dtype() != Type.Uint16, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } template <> bool *Storage_base::data() const { - cytnx_error_msg(dtype != Type.Bool, + cytnx_error_msg(this->dtype() != Type.Bool, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem); + return static_cast(this->data()); } // get complex raw pointer using CUDA complex type @@ -652,26 +653,26 @@ namespace cytnx { template <> cuDoubleComplex *Storage_base::data() const { cytnx_error_msg( - dtype != Type.ComplexDouble, + this->dtype() != Type.ComplexDouble, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->device == Device.cpu, "%s", + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->device() == Device.cpu, "%s", "[ERROR] the Storage is on CPU(Host) but try to get with CUDA complex type " "cuDoubleComplex. use type or < complex > instead."); cudaDeviceSynchronize(); - return static_cast(this->Mem); + return static_cast(this->data()); } template <> cuFloatComplex *Storage_base::data() const { cytnx_error_msg( - dtype != Type.ComplexFloat, + this->dtype() != Type.ComplexFloat, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->device == Device.cpu, "%s", + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->device() == Device.cpu, "%s", "[ERROR] the Storage is on CPU(Host) but try to get with CUDA complex type " "cuFloatComplex. use type or < complex > instead."); cudaDeviceSynchronize(); - return static_cast(this->Mem); + return static_cast(this->data()); } #endif @@ -680,310 +681,310 @@ namespace cytnx { template <> float &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) { - cytnx_error_msg(dtype != Type.Float, + cytnx_error_msg(this->dtype() != Type.Float, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); } - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> double &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) { - cytnx_error_msg(dtype != Type.Double, + cytnx_error_msg(this->dtype() != Type.Double, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); } - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> std::complex &Storage_base::at>(const cytnx_uint64 &idx) const { if (cytnx::User_debug) cytnx_error_msg( - dtype != Type.ComplexFloat, + this->dtype() != Type.ComplexFloat, "[ERROR] type mismatch. try to get < complex > type from raw data of type %s", - Type.getname(dtype).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + Type.getname(this->dtype()).c_str()); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast *>(this->Mem)[idx]; + return static_cast *>(this->data())[idx]; } template <> std::complex &Storage_base::at>(const cytnx_uint64 &idx) const { if (cytnx::User_debug) cytnx_error_msg( - dtype != Type.ComplexDouble, + this->dtype() != Type.ComplexDouble, "[ERROR] type mismatch. try to get < complex > type from raw data of type %s", - Type.getname(dtype).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + Type.getname(this->dtype()).c_str()); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast *>(this->Mem)[idx]; + return static_cast *>(this->data())[idx]; } template <> uint32_t &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) - cytnx_error_msg(dtype != Type.Uint32, + cytnx_error_msg(this->dtype() != Type.Uint32, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + Type.getname(this->dtype()).c_str()); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> int32_t &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) - cytnx_error_msg(dtype != Type.Int32, + cytnx_error_msg(this->dtype() != Type.Int32, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + Type.getname(this->dtype()).c_str()); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> uint64_t &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) - cytnx_error_msg(dtype != Type.Uint64, + cytnx_error_msg(this->dtype() != Type.Uint64, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + Type.getname(this->dtype()).c_str()); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> int64_t &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) - cytnx_error_msg(dtype != Type.Int64, + cytnx_error_msg(this->dtype() != Type.Int64, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + Type.getname(this->dtype()).c_str()); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> uint16_t &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) - cytnx_error_msg(dtype != Type.Uint16, + cytnx_error_msg(this->dtype() != Type.Uint16, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> int16_t &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) - cytnx_error_msg(dtype != Type.Int16, + cytnx_error_msg(this->dtype() != Type.Int16, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + Type.getname(this->dtype()).c_str()); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } template <> bool &Storage_base::at(const cytnx_uint64 &idx) const { if (cytnx::User_debug) - cytnx_error_msg(dtype != Type.Bool, + cytnx_error_msg(this->dtype() != Type.Bool, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); + Type.getname(this->dtype()).c_str()); - if (idx >= this->len) - cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->len); + if (idx >= this->size()) + cytnx_error_msg(1, "[ERROR] index [%d] out of bound [%d]\n", idx, this->size()); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[idx]; + return static_cast(this->data())[idx]; } // instantiation: //==================================================== template <> float &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Float, + cytnx_error_msg(this->dtype() != Type.Float, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> double &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Double, + cytnx_error_msg(this->dtype() != Type.Double, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> std::complex &Storage_base::back>() const { cytnx_error_msg( - dtype != Type.ComplexFloat, + this->dtype() != Type.ComplexFloat, "[ERROR] type mismatch. try to get < complex > type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast *>(this->Mem)[this->len - 1]; + return static_cast *>(this->data())[this->size() - 1]; } template <> std::complex &Storage_base::back>() const { cytnx_error_msg( - dtype != Type.ComplexDouble, + this->dtype() != Type.ComplexDouble, "[ERROR] type mismatch. try to get < complex > type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast *>(this->Mem)[this->len - 1]; + return static_cast *>(this->data())[this->size() - 1]; } template <> uint32_t &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Uint32, + cytnx_error_msg(this->dtype() != Type.Uint32, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> int32_t &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Int32, + cytnx_error_msg(this->dtype() != Type.Int32, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> uint64_t &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Uint64, + cytnx_error_msg(this->dtype() != Type.Uint64, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> int64_t &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Int64, + cytnx_error_msg(this->dtype() != Type.Int64, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> uint16_t &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Uint16, + cytnx_error_msg(this->dtype() != Type.Uint16, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> int16_t &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Int16, + cytnx_error_msg(this->dtype() != Type.Int16, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } template <> bool &Storage_base::back() const { - cytnx_error_msg(dtype != Type.Bool, + cytnx_error_msg(this->dtype() != Type.Bool, "[ERROR] type mismatch. try to get type from raw data of type %s", - Type.getname(dtype).c_str()); - cytnx_error_msg(this->len == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); + Type.getname(this->dtype()).c_str()); + cytnx_error_msg(this->size() == 0, "[ERROR] cannot call back on empty stoarge.%s", "\n"); #ifdef UNI_GPU cudaDeviceSynchronize(); #endif - return static_cast(this->Mem)[this->len - 1]; + return static_cast(this->data())[this->size() - 1]; } void Storage_base::_cpy_bool(void *ptr, const std::vector &vin) { diff --git a/src/backend/algo_internal_cpu/Concate_internal.cpp b/src/backend/algo_internal_cpu/Concate_internal.cpp index 7e17ebdd..74edfad0 100644 --- a/src/backend/algo_internal_cpu/Concate_internal.cpp +++ b/src/backend/algo_internal_cpu/Concate_internal.cpp @@ -18,7 +18,7 @@ namespace cytnx { // 2. out is properly allocated! // 4. checking bool type!! - // cytnx_uint64 ElemSize = Type.typeSize(out->dtype); + // cytnx_uint64 ElemSize = Type.typeSize(out->dtype()); cytnx_uint64 offs = 0; // char *out_ptr = (char*)out->Mem; diff --git a/src/backend/algo_internal_cpu/Sort_internal.cpp b/src/backend/algo_internal_cpu/Sort_internal.cpp index 6f6856a9..f968a18c 100644 --- a/src/backend/algo_internal_cpu/Sort_internal.cpp +++ b/src/backend/algo_internal_cpu/Sort_internal.cpp @@ -17,7 +17,7 @@ namespace cytnx { } void Sort_internal_cd(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_complex128 *p = (cytnx_complex128 *)out->Mem; + cytnx_complex128 *p = (cytnx_complex128 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride, _compare_c128); @@ -28,7 +28,7 @@ namespace cytnx { } void Sort_internal_cf(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_complex64 *p = (cytnx_complex64 *)out->Mem; + cytnx_complex64 *p = (cytnx_complex64 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride, _compare_c64); @@ -36,56 +36,56 @@ namespace cytnx { void Sort_internal_d(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - double *p = (double *)out->Mem; + double *p = (double *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } void Sort_internal_f(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - float *p = (float *)out->Mem; + float *p = (float *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } void Sort_internal_u64(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_uint64 *p = (cytnx_uint64 *)out->Mem; + cytnx_uint64 *p = (cytnx_uint64 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } void Sort_internal_i64(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_int64 *p = (cytnx_int64 *)out->Mem; + cytnx_int64 *p = (cytnx_int64 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } void Sort_internal_u32(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_uint32 *p = (cytnx_uint32 *)out->Mem; + cytnx_uint32 *p = (cytnx_uint32 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } void Sort_internal_i32(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_int32 *p = (cytnx_int32 *)out->Mem; + cytnx_int32 *p = (cytnx_int32 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } void Sort_internal_u16(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_uint16 *p = (cytnx_uint16 *)out->Mem; + cytnx_uint16 *p = (cytnx_uint16 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } void Sort_internal_i16(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_int16 *p = (cytnx_int16 *)out->Mem; + cytnx_int16 *p = (cytnx_int16 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) std::sort(p + i * stride, p + i * stride + stride); } diff --git a/src/backend/algo_internal_gpu/cuConcate_internal.cu b/src/backend/algo_internal_gpu/cuConcate_internal.cu index ce2b569e..6b076a1f 100644 --- a/src/backend/algo_internal_gpu/cuConcate_internal.cu +++ b/src/backend/algo_internal_gpu/cuConcate_internal.cu @@ -11,7 +11,7 @@ namespace cytnx { // 2. out is properly allocated! // 4. checking bool type!! - // cytnx_uint64 ElemSize = Type.typeSize(out->dtype); + // cytnx_uint64 ElemSize = Type.typeSize(out->dtype()); cytnx_uint64 offs = 0; // char *out_ptr = (char*)out->Mem; diff --git a/src/backend/algo_internal_gpu/cuSort_internal.cu b/src/backend/algo_internal_gpu/cuSort_internal.cu index 1be96a5b..658f9806 100644 --- a/src/backend/algo_internal_gpu/cuSort_internal.cu +++ b/src/backend/algo_internal_gpu/cuSort_internal.cu @@ -14,7 +14,7 @@ namespace cytnx { } void cuSort_internal_cd(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_complex128 *p = (cytnx_complex128 *)out->Mem; + cytnx_complex128 *p = (cytnx_complex128 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride, cu_compare_c128); @@ -25,7 +25,7 @@ namespace cytnx { } void cuSort_internal_cf(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_complex64 *p = (cytnx_complex64 *)out->Mem; + cytnx_complex64 *p = (cytnx_complex64 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride, cu_compare_c64); @@ -33,7 +33,7 @@ namespace cytnx { void cuSort_internal_d(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - double *p = (double *)out->Mem; + double *p = (double *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); @@ -41,7 +41,7 @@ namespace cytnx { void cuSort_internal_f(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - float *p = (float *)out->Mem; + float *p = (float *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); @@ -49,7 +49,7 @@ namespace cytnx { void cuSort_internal_u64(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_uint64 *p = (cytnx_uint64 *)out->Mem; + cytnx_uint64 *p = (cytnx_uint64 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); @@ -57,7 +57,7 @@ namespace cytnx { void cuSort_internal_i64(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_int64 *p = (cytnx_int64 *)out->Mem; + cytnx_int64 *p = (cytnx_int64 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); @@ -65,7 +65,7 @@ namespace cytnx { void cuSort_internal_u32(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_uint32 *p = (cytnx_uint32 *)out->Mem; + cytnx_uint32 *p = (cytnx_uint32 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); @@ -73,7 +73,7 @@ namespace cytnx { void cuSort_internal_i32(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_int32 *p = (cytnx_int32 *)out->Mem; + cytnx_int32 *p = (cytnx_int32 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); @@ -81,7 +81,7 @@ namespace cytnx { void cuSort_internal_u16(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_uint16 *p = (cytnx_uint16 *)out->Mem; + cytnx_uint16 *p = (cytnx_uint16 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); @@ -89,7 +89,7 @@ namespace cytnx { void cuSort_internal_i16(boost::intrusive_ptr &out, const cytnx_uint64 &stride, const cytnx_uint64 &Nelem) { - cytnx_int16 *p = (cytnx_int16 *)out->Mem; + cytnx_int16 *p = (cytnx_int16 *)out->data(); cytnx_uint64 Niter = Nelem / stride; for (cytnx_uint64 i = 0; i < Niter; i++) thrust::sort(p + i * stride, p + i * stride + stride); diff --git a/src/backend/linalg_internal_cpu/Abs_internal.cpp b/src/backend/linalg_internal_cpu/Abs_internal.cpp index 08e58c7d..dc5bb631 100644 --- a/src/backend/linalg_internal_cpu/Abs_internal.cpp +++ b/src/backend/linalg_internal_cpu/Abs_internal.cpp @@ -11,8 +11,8 @@ namespace cytnx { namespace linalg_internal { void Abs_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_complex128 *_ten = (cytnx_complex128 *)ten->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_complex128 *_ten = (cytnx_complex128 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -22,8 +22,8 @@ namespace cytnx { void Abs_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_complex64 *_ten = (cytnx_complex64 *)ten->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_complex64 *_ten = (cytnx_complex64 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -33,8 +33,8 @@ namespace cytnx { void Abs_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_ten = (cytnx_double *)ten->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_ten = (cytnx_double *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -44,8 +44,8 @@ namespace cytnx { void Abs_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_ten = (cytnx_float *)ten->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_ten = (cytnx_float *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -56,8 +56,8 @@ namespace cytnx { void Abs_internal_i64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_ten = (cytnx_int64 *)ten->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_ten = (cytnx_int64 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -68,8 +68,8 @@ namespace cytnx { void Abs_internal_i32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_ten = (cytnx_int32 *)ten->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_ten = (cytnx_int32 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -80,8 +80,8 @@ namespace cytnx { void Abs_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_ten = (cytnx_int16 *)ten->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_ten = (cytnx_int16 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { diff --git a/src/backend/linalg_internal_cpu/Add_internal.cpp b/src/backend/linalg_internal_cpu/Add_internal.cpp index 0fc4b37e..eabc10be 100644 --- a/src/backend/linalg_internal_cpu/Add_internal.cpp +++ b/src/backend/linalg_internal_cpu/Add_internal.cpp @@ -17,9 +17,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len; i++) { @@ -68,9 +68,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -120,9 +120,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -172,9 +172,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -224,9 +224,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -276,9 +276,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -328,9 +328,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -380,9 +380,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -432,9 +432,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -484,9 +484,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -536,9 +536,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -599,9 +599,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -651,9 +651,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -703,9 +703,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -755,9 +755,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len; i++) { @@ -806,9 +806,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -858,9 +858,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -910,9 +910,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -962,9 +962,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1014,9 +1014,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1066,9 +1066,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1137,9 +1137,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1189,9 +1189,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1241,9 +1241,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1293,9 +1293,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1345,9 +1345,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1397,9 +1397,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1449,9 +1449,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1501,9 +1501,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1553,9 +1553,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1630,9 +1630,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1682,9 +1682,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1734,9 +1734,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1786,9 +1786,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1838,9 +1838,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1890,9 +1890,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1942,9 +1942,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1994,9 +1994,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2080,9 +2080,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2132,9 +2132,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2184,9 +2184,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2236,9 +2236,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2288,9 +2288,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2340,9 +2340,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2392,9 +2392,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2486,9 +2486,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2538,9 +2538,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2590,9 +2590,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2642,9 +2642,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2694,9 +2694,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2746,9 +2746,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2848,9 +2848,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2900,9 +2900,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2952,9 +2952,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3004,9 +3004,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3056,9 +3056,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3166,9 +3166,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3218,9 +3218,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3270,9 +3270,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3322,9 +3322,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3440,9 +3440,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3492,9 +3492,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3544,9 +3544,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3670,9 +3670,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3722,9 +3722,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3856,9 +3856,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) diff --git a/src/backend/linalg_internal_cpu/Axpy_internal.cpp b/src/backend/linalg_internal_cpu/Axpy_internal.cpp index f64964d6..9f6d8ad9 100644 --- a/src/backend/linalg_internal_cpu/Axpy_internal.cpp +++ b/src/backend/linalg_internal_cpu/Axpy_internal.cpp @@ -7,32 +7,32 @@ namespace cytnx { void Axpy_internal_cd(const boost::intrusive_ptr &x, boost::intrusive_ptr &y, const Scalar &a) { - cytnx_complex128 *_x = (cytnx_complex128 *)x->Mem; - cytnx_complex128 *_y = (cytnx_complex128 *)y->Mem; + cytnx_complex128 *_x = (cytnx_complex128 *)x->data(); + cytnx_complex128 *_y = (cytnx_complex128 *)y->data(); cytnx_complex128 _a = complex128(a); cblas_zaxpy(x->size(), (double *)&_a, (double *)_x, 1, (double *)_y, 1); } void Axpy_internal_cf(const boost::intrusive_ptr &x, boost::intrusive_ptr &y, const Scalar &a) { - cytnx_complex64 *_x = (cytnx_complex64 *)x->Mem; - cytnx_complex64 *_y = (cytnx_complex64 *)y->Mem; + cytnx_complex64 *_x = (cytnx_complex64 *)x->data(); + cytnx_complex64 *_y = (cytnx_complex64 *)y->data(); cytnx_complex64 _a = complex64(a); cblas_caxpy(x->size(), (float *)&_a, (float *)_x, 1, (float *)_y, 1); } void Axpy_internal_d(const boost::intrusive_ptr &x, boost::intrusive_ptr &y, const Scalar &a) { - cytnx_double *_x = (cytnx_double *)x->Mem; - cytnx_double *_y = (cytnx_double *)y->Mem; + cytnx_double *_x = (cytnx_double *)x->data(); + cytnx_double *_y = (cytnx_double *)y->data(); cytnx_double _a = double(a); cblas_daxpy(x->size(), _a, _x, 1, _y, 1); } void Axpy_internal_f(const boost::intrusive_ptr &x, boost::intrusive_ptr &y, const Scalar &a) { - cytnx_float *_x = (cytnx_float *)x->Mem; - cytnx_float *_y = (cytnx_float *)y->Mem; + cytnx_float *_x = (cytnx_float *)x->data(); + cytnx_float *_y = (cytnx_float *)y->data(); cytnx_float _a = float(a); cblas_saxpy(x->size(), _a, _x, 1, _y, 1); } diff --git a/src/backend/linalg_internal_cpu/Conj_inplace_internal.cpp b/src/backend/linalg_internal_cpu/Conj_inplace_internal.cpp index fff8599d..000dee44 100644 --- a/src/backend/linalg_internal_cpu/Conj_inplace_internal.cpp +++ b/src/backend/linalg_internal_cpu/Conj_inplace_internal.cpp @@ -11,7 +11,7 @@ namespace cytnx { void Conj_inplace_internal_cf(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_complex64 *tmp = (cytnx_complex64 *)ten->Mem; + cytnx_complex64 *tmp = (cytnx_complex64 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -21,7 +21,7 @@ namespace cytnx { void Conj_inplace_internal_cd(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_complex128 *tmp = (cytnx_complex128 *)ten->Mem; + cytnx_complex128 *tmp = (cytnx_complex128 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { diff --git a/src/backend/linalg_internal_cpu/Cpr_internal.cpp b/src/backend/linalg_internal_cpu/Cpr_internal.cpp index 748bd655..74d813e2 100644 --- a/src/backend/linalg_internal_cpu/Cpr_internal.cpp +++ b/src/backend/linalg_internal_cpu/Cpr_internal.cpp @@ -17,9 +17,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len; i++) { @@ -68,9 +68,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -121,9 +121,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -174,9 +174,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -227,9 +227,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -280,9 +280,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -333,9 +333,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -386,9 +386,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -439,9 +439,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -492,9 +492,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -545,9 +545,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -608,9 +608,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -660,9 +660,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -713,9 +713,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -766,9 +766,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len; i++) { @@ -818,9 +818,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -871,9 +871,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -924,9 +924,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -977,9 +977,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1030,9 +1030,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1083,9 +1083,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1154,9 +1154,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); // std::cout << "good" << std::endl; if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1206,9 +1206,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1258,9 +1258,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1310,9 +1310,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1362,9 +1362,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1414,9 +1414,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1466,9 +1466,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1518,9 +1518,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1570,9 +1570,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1647,9 +1647,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1699,9 +1699,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1751,9 +1751,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1803,9 +1803,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1855,9 +1855,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1907,9 +1907,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1959,9 +1959,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2011,9 +2011,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2097,9 +2097,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2149,9 +2149,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2201,9 +2201,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2253,9 +2253,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2305,9 +2305,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2357,9 +2357,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2409,9 +2409,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2503,9 +2503,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2555,9 +2555,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2607,9 +2607,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2659,9 +2659,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2711,9 +2711,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2763,9 +2763,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2866,9 +2866,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2918,9 +2918,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2970,9 +2970,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3022,9 +3022,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3074,9 +3074,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3184,9 +3184,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3236,9 +3236,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3288,9 +3288,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3340,9 +3340,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3459,9 +3459,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3511,9 +3511,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3563,9 +3563,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3689,9 +3689,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3741,9 +3741,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3876,9 +3876,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) diff --git a/src/backend/linalg_internal_cpu/Det_internal.cpp b/src/backend/linalg_internal_cpu/Det_internal.cpp index c507059e..119a1697 100644 --- a/src/backend/linalg_internal_cpu/Det_internal.cpp +++ b/src/backend/linalg_internal_cpu/Det_internal.cpp @@ -20,8 +20,8 @@ namespace cytnx { const cytnx_uint64 &L) { cytnx_complex128 *od = static_cast(out); lapack_complex_double *_Rin = - (lapack_complex_double *)malloc(sizeof(cytnx_complex128) * Rin->len); - memcpy(_Rin, Rin->Mem, sizeof(cytnx_complex128) * Rin->len); + (lapack_complex_double *)malloc(sizeof(cytnx_complex128) * Rin->size()); + memcpy(_Rin, Rin->data(), sizeof(cytnx_complex128) * Rin->size()); lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int N = L; @@ -56,8 +56,8 @@ namespace cytnx { const cytnx_uint64 &L) { cytnx_complex64 *od = static_cast(out); lapack_complex_float *_Rin = - (lapack_complex_float *)malloc(sizeof(cytnx_complex64) * Rin->len); - memcpy(_Rin, Rin->Mem, sizeof(cytnx_complex64) * Rin->len); + (lapack_complex_float *)malloc(sizeof(cytnx_complex64) * Rin->size()); + memcpy(_Rin, Rin->data(), sizeof(cytnx_complex64) * Rin->size()); lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int N = L; @@ -91,8 +91,8 @@ namespace cytnx { void Det_internal_d(void *out, const boost::intrusive_ptr &Rin, const cytnx_uint64 &L) { cytnx_double *od = static_cast(out); - cytnx_double *_Rin = (cytnx_double *)malloc(sizeof(cytnx_double) * Rin->len); - memcpy(_Rin, Rin->Mem, sizeof(cytnx_double) * Rin->len); + cytnx_double *_Rin = (cytnx_double *)malloc(sizeof(cytnx_double) * Rin->size()); + memcpy(_Rin, Rin->data(), sizeof(cytnx_double) * Rin->size()); lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int N = L; @@ -114,8 +114,8 @@ namespace cytnx { void Det_internal_f(void *out, const boost::intrusive_ptr &Rin, const cytnx_uint64 &L) { float *od = static_cast(out); - cytnx_float *_Rin = (cytnx_float *)malloc(sizeof(cytnx_float) * Rin->len); - memcpy(_Rin, Rin->Mem, sizeof(cytnx_float) * Rin->len); + cytnx_float *_Rin = (cytnx_float *)malloc(sizeof(cytnx_float) * Rin->size()); + memcpy(_Rin, Rin->data(), sizeof(cytnx_float) * Rin->size()); lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int N = L; diff --git a/src/backend/linalg_internal_cpu/Diag_internal.cpp b/src/backend/linalg_internal_cpu/Diag_internal.cpp index 0da9faf2..95a8dc4a 100644 --- a/src/backend/linalg_internal_cpu/Diag_internal.cpp +++ b/src/backend/linalg_internal_cpu/Diag_internal.cpp @@ -23,8 +23,8 @@ namespace cytnx { void Diag_internal_b(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_ten = (cytnx_bool *)ten->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_ten = (cytnx_bool *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -32,8 +32,8 @@ namespace cytnx { void Diag_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_ten = (cytnx_int16 *)ten->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_ten = (cytnx_int16 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -41,8 +41,8 @@ namespace cytnx { void Diag_internal_u16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_ten = (cytnx_uint16 *)ten->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_ten = (cytnx_uint16 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -50,8 +50,8 @@ namespace cytnx { void Diag_internal_i32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_ten = (cytnx_int32 *)ten->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_ten = (cytnx_int32 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -59,8 +59,8 @@ namespace cytnx { void Diag_internal_u32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_ten = (cytnx_uint32 *)ten->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_ten = (cytnx_uint32 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -68,8 +68,8 @@ namespace cytnx { void Diag_internal_i64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_ten = (cytnx_int64 *)ten->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_ten = (cytnx_int64 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -77,8 +77,8 @@ namespace cytnx { void Diag_internal_u64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_ten = (cytnx_uint64 *)ten->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_ten = (cytnx_uint64 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -86,8 +86,8 @@ namespace cytnx { void Diag_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_ten = (cytnx_double *)ten->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_ten = (cytnx_double *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -95,8 +95,8 @@ namespace cytnx { void Diag_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_ten = (cytnx_float *)ten->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_ten = (cytnx_float *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -104,8 +104,8 @@ namespace cytnx { void Diag_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_ten = (cytnx_complex128 *)ten->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_ten = (cytnx_complex128 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } @@ -113,8 +113,8 @@ namespace cytnx { void Diag_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &L, const cytnx_bool &isrank2) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_ten = (cytnx_complex64 *)ten->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_ten = (cytnx_complex64 *)ten->data(); Diag_internal_driver(_out, _ten, L, isrank2); } diff --git a/src/backend/linalg_internal_cpu/Div_internal.cpp b/src/backend/linalg_internal_cpu/Div_internal.cpp index 8980c72e..6751e32a 100644 --- a/src/backend/linalg_internal_cpu/Div_internal.cpp +++ b/src/backend/linalg_internal_cpu/Div_internal.cpp @@ -17,9 +17,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len; i++) { @@ -78,9 +78,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -138,9 +138,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); // std::cout << "ok" << std::endl; // std::cout << Lin->size() << " " << Rin->size() << " " << len << std::endl; @@ -202,9 +202,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -262,9 +262,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -322,9 +322,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -382,9 +382,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -442,9 +442,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -502,9 +502,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -562,9 +562,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -622,9 +622,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -684,9 +684,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -736,9 +736,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -789,9 +789,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -849,9 +849,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -909,9 +909,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -969,9 +969,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1029,9 +1029,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1089,9 +1089,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1149,9 +1149,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1209,9 +1209,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1269,9 +1269,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1331,9 +1331,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1383,9 +1383,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1435,9 +1435,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1495,9 +1495,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1555,9 +1555,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1615,9 +1615,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1675,9 +1675,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1735,9 +1735,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1795,9 +1795,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1855,9 +1855,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1915,9 +1915,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1976,9 +1976,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2028,9 +2028,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2080,9 +2080,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2132,9 +2132,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2192,9 +2192,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2252,9 +2252,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2312,9 +2312,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2372,9 +2372,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2432,9 +2432,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2492,9 +2492,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2552,9 +2552,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2613,9 +2613,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2665,9 +2665,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2717,9 +2717,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2769,9 +2769,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2821,9 +2821,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2873,9 +2873,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2925,9 +2925,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2977,9 +2977,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3029,9 +3029,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3081,9 +3081,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3133,9 +3133,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3186,9 +3186,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3238,9 +3238,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3290,9 +3290,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3342,9 +3342,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3394,9 +3394,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3446,9 +3446,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3498,9 +3498,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3550,9 +3550,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3602,9 +3602,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3654,9 +3654,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3706,9 +3706,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3759,9 +3759,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3811,9 +3811,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3863,9 +3863,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3915,9 +3915,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3967,9 +3967,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4019,9 +4019,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4071,9 +4071,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4123,9 +4123,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4175,9 +4175,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4227,9 +4227,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4279,9 +4279,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4332,9 +4332,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4384,9 +4384,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4436,9 +4436,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4488,9 +4488,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4540,9 +4540,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4592,9 +4592,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4644,9 +4644,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4696,9 +4696,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4748,9 +4748,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4800,9 +4800,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4852,9 +4852,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4905,9 +4905,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4957,9 +4957,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5009,9 +5009,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5061,9 +5061,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5113,9 +5113,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5165,9 +5165,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5217,9 +5217,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5269,9 +5269,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5321,9 +5321,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5373,9 +5373,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5425,9 +5425,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5478,9 +5478,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5530,9 +5530,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5582,9 +5582,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5634,9 +5634,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5686,9 +5686,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5738,9 +5738,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5790,9 +5790,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5842,9 +5842,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5894,9 +5894,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5946,9 +5946,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5998,9 +5998,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6051,9 +6051,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6104,9 +6104,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6157,9 +6157,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6209,9 +6209,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6261,9 +6261,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6313,9 +6313,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6365,9 +6365,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6417,9 +6417,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6469,9 +6469,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6521,9 +6521,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6573,9 +6573,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) diff --git a/src/backend/linalg_internal_cpu/Eig_internal.cpp b/src/backend/linalg_internal_cpu/Eig_internal.cpp index eb0f2305..52f17a44 100644 --- a/src/backend/linalg_internal_cpu/Eig_internal.cpp +++ b/src/backend/linalg_internal_cpu/Eig_internal.cpp @@ -15,9 +15,9 @@ namespace cytnx { cytnx_complex128 *tA; cytnx_complex128 *buffer_A = (cytnx_complex128 *)malloc(cytnx_uint64(L) * L * sizeof(cytnx_complex128)); - memcpy(buffer_A, in->Mem, sizeof(cytnx_complex128) * cytnx_uint64(L) * L); - if (v->dtype != Type.Void) { - tA = (cytnx_complex128 *)v->Mem; + memcpy(buffer_A, in->data(), sizeof(cytnx_complex128) * cytnx_uint64(L) * L); + if (v->dtype() != Type.Void) { + tA = (cytnx_complex128 *)v->data(); jobs = 'V'; } @@ -26,8 +26,8 @@ namespace cytnx { lapack_int ONE = 1; info = LAPACKE_zgeev(LAPACK_COL_MAJOR, jobs, 'N', L, (lapack_complex_double *)buffer_A, ldA, - (lapack_complex_double *)e->Mem, (lapack_complex_double *)tA, L, nullptr, - ONE); + (lapack_complex_double *)e->data(), (lapack_complex_double *)tA, L, + nullptr, ONE); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'zgeev': Lapack INFO = ", info); @@ -41,9 +41,9 @@ namespace cytnx { cytnx_complex64 *tA; cytnx_complex64 *buffer_A = (cytnx_complex64 *)malloc(cytnx_uint64(L) * L * sizeof(cytnx_complex64)); - memcpy(buffer_A, in->Mem, sizeof(cytnx_complex64) * cytnx_uint64(L) * L); - if (v->dtype != Type.Void) { - tA = (cytnx_complex64 *)v->Mem; + memcpy(buffer_A, in->data(), sizeof(cytnx_complex64) * cytnx_uint64(L) * L); + if (v->dtype() != Type.Void) { + tA = (cytnx_complex64 *)v->data(); jobs = 'V'; } @@ -51,9 +51,9 @@ namespace cytnx { lapack_int info; lapack_int ONE = 1; - info = - LAPACKE_cgeev(LAPACK_COL_MAJOR, jobs, 'N', L, (lapack_complex_float *)buffer_A, ldA, - (lapack_complex_float *)e->Mem, (lapack_complex_float *)tA, L, nullptr, ONE); + info = LAPACKE_cgeev(LAPACK_COL_MAJOR, jobs, 'N', L, (lapack_complex_float *)buffer_A, ldA, + (lapack_complex_float *)e->data(), (lapack_complex_float *)tA, L, + nullptr, ONE); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'cgeev': Lapack INFO = ", info); @@ -72,7 +72,7 @@ namespace cytnx { cytnx_double *e_real = (cytnx_double*)malloc(cytnx_uint64(L)*sizeof(cytnx_double)); cytnx_double *e_imag = (cytnx_double*)malloc(cytnx_uint64(L)*sizeof(cytnx_double)); - if(v->dtype!=Type.Void){ + if(v->dtype()!=Type.Void){ tA = (cytnx_double*)v->Mem; jobs = 'V'; } @@ -120,7 +120,7 @@ namespace cytnx { cytnx_float *e_real = (cytnx_float*)malloc(cytnx_uint64(L)*sizeof(cytnx_float)); cytnx_float *e_imag = (cytnx_float*)malloc(cytnx_uint64(L)*sizeof(cytnx_float)); - if(v->dtype!=Type.Void){ + if(v->dtype()!=Type.Void){ tA = (cytnx_float*)v->Mem; jobs = 'V'; } diff --git a/src/backend/linalg_internal_cpu/Eigh_internal.cpp b/src/backend/linalg_internal_cpu/Eigh_internal.cpp index 0501ab2c..523dcd0c 100644 --- a/src/backend/linalg_internal_cpu/Eigh_internal.cpp +++ b/src/backend/linalg_internal_cpu/Eigh_internal.cpp @@ -13,23 +13,23 @@ namespace cytnx { char jobs = 'N'; cytnx_complex128 *tA; - if (v->dtype != Type.Void) { - tA = (cytnx_complex128 *)v->Mem; - memcpy(v->Mem, in->Mem, sizeof(cytnx_complex128) * cytnx_uint64(L) * L); + if (v->dtype() != Type.Void) { + tA = (cytnx_complex128 *)v->data(); + memcpy(v->data(), in->data(), sizeof(cytnx_complex128) * cytnx_uint64(L) * L); jobs = 'V'; } else { tA = (cytnx_complex128 *)malloc(cytnx_uint64(L) * L * sizeof(cytnx_complex128)); - memcpy(tA, in->Mem, sizeof(cytnx_complex128) * cytnx_uint64(L) * L); + memcpy(tA, in->data(), sizeof(cytnx_complex128) * cytnx_uint64(L) * L); } lapack_int ldA = L; lapack_int info; info = LAPACKE_zheev(LAPACK_COL_MAJOR, jobs, 'U', L, (lapack_complex_double *)tA, ldA, - (cytnx_double *)e->Mem); + (cytnx_double *)e->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'zheev': Lapack INFO = ", info); - if (v->dtype == Type.Void) free(tA); + if (v->dtype() == Type.Void) free(tA); } void Eigh_internal_cf(const boost::intrusive_ptr &in, boost::intrusive_ptr &e, @@ -37,22 +37,22 @@ namespace cytnx { char jobs = 'N'; cytnx_complex64 *tA; - if (v->dtype != Type.Void) { - tA = (cytnx_complex64 *)v->Mem; - memcpy(v->Mem, in->Mem, sizeof(cytnx_complex64) * cytnx_uint64(L) * L); + if (v->dtype() != Type.Void) { + tA = (cytnx_complex64 *)v->data(); + memcpy(v->data(), in->data(), sizeof(cytnx_complex64) * cytnx_uint64(L) * L); jobs = 'V'; } else { tA = (cytnx_complex64 *)malloc(cytnx_uint64(L) * L * sizeof(cytnx_complex64)); - memcpy(tA, in->Mem, sizeof(cytnx_complex64) * cytnx_uint64(L) * L); + memcpy(tA, in->data(), sizeof(cytnx_complex64) * cytnx_uint64(L) * L); } lapack_int ldA = L; lapack_int info; info = LAPACKE_cheev(LAPACK_COL_MAJOR, jobs, 'U', L, (lapack_complex_float *)tA, ldA, - (cytnx_float *)e->Mem); + (cytnx_float *)e->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'cheev': Lapack INFO = ", info); - if (v->dtype == Type.Void) free(tA); + if (v->dtype() == Type.Void) free(tA); } void Eigh_internal_d(const boost::intrusive_ptr &in, boost::intrusive_ptr &e, @@ -60,22 +60,22 @@ namespace cytnx { char jobs = 'N'; cytnx_double *tA; - if (v->dtype != Type.Void) { - tA = (cytnx_double *)v->Mem; - memcpy(v->Mem, in->Mem, sizeof(cytnx_double) * cytnx_uint64(L) * L); + if (v->dtype() != Type.Void) { + tA = (cytnx_double *)v->data(); + memcpy(v->data(), in->data(), sizeof(cytnx_double) * cytnx_uint64(L) * L); jobs = 'V'; } else { tA = (cytnx_double *)malloc(cytnx_uint64(L) * L * sizeof(cytnx_double)); - memcpy(tA, in->Mem, sizeof(cytnx_double) * cytnx_uint64(L) * L); + memcpy(tA, in->data(), sizeof(cytnx_double) * cytnx_uint64(L) * L); } lapack_int ldA = L; lapack_int info; - info = LAPACKE_dsyev(LAPACK_COL_MAJOR, jobs, 'U', L, tA, ldA, (cytnx_double *)e->Mem); + info = LAPACKE_dsyev(LAPACK_COL_MAJOR, jobs, 'U', L, tA, ldA, (cytnx_double *)e->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dsyev': Lapack INFO = ", info); - if (v->dtype == Type.Void) free(tA); + if (v->dtype() == Type.Void) free(tA); } void Eigh_internal_f(const boost::intrusive_ptr &in, boost::intrusive_ptr &e, @@ -83,21 +83,21 @@ namespace cytnx { char jobs = 'N'; cytnx_float *tA; - if (v->dtype != Type.Void) { - tA = (cytnx_float *)v->Mem; - memcpy(v->Mem, in->Mem, sizeof(cytnx_float) * cytnx_uint64(L) * L); + if (v->dtype() != Type.Void) { + tA = (cytnx_float *)v->data(); + memcpy(v->data(), in->data(), sizeof(cytnx_float) * cytnx_uint64(L) * L); jobs = 'V'; } else { tA = (cytnx_float *)malloc(cytnx_uint64(L) * L * sizeof(cytnx_float)); - memcpy(tA, in->Mem, sizeof(cytnx_float) * cytnx_uint64(L) * L); + memcpy(tA, in->data(), sizeof(cytnx_float) * cytnx_uint64(L) * L); } lapack_int ldA = L; lapack_int info; - info = LAPACKE_ssyev(LAPACK_COL_MAJOR, jobs, 'U', L, tA, ldA, (cytnx_float *)e->Mem); + info = LAPACKE_ssyev(LAPACK_COL_MAJOR, jobs, 'U', L, tA, ldA, (cytnx_float *)e->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'ssyev': Lapack INFO = ", info); - if (v->dtype == Type.Void) free(tA); + if (v->dtype() == Type.Void) free(tA); } } // namespace linalg_internal diff --git a/src/backend/linalg_internal_cpu/Exp_internal.cpp b/src/backend/linalg_internal_cpu/Exp_internal.cpp index 8ee6cb16..bed9a5d9 100644 --- a/src/backend/linalg_internal_cpu/Exp_internal.cpp +++ b/src/backend/linalg_internal_cpu/Exp_internal.cpp @@ -11,8 +11,9 @@ namespace cytnx { void Exp_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_double *_ten = (cytnx_double *)ten->Mem; - cytnx_double *_out = (cytnx_double *)out->Mem; + cytnx_double *_ten = (cytnx_double *)ten->data(); + cytnx_double *_out = (cytnx_double *)out->data(); + #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { _out[n] = exp(_ten[n]); @@ -21,8 +22,9 @@ namespace cytnx { void Exp_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_float *_ten = (cytnx_float *)ten->Mem; - cytnx_float *_out = (cytnx_float *)out->Mem; + cytnx_float *_ten = (cytnx_float *)ten->data(); + cytnx_float *_out = (cytnx_float *)out->data(); + #pragma omp parallel for for (cytnx_uint64 n = 0; n < Nelem; n++) { _out[n] = expf(_ten[n]); @@ -31,8 +33,8 @@ namespace cytnx { void Exp_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_ten = (cytnx_complex128 *)ten->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_ten = (cytnx_complex128 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -42,8 +44,8 @@ namespace cytnx { void Exp_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_ten = (cytnx_complex64 *)ten->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_ten = (cytnx_complex64 *)ten->data(); #pragma omp parallel for for (cytnx_uint64 n = 0; n < Nelem; n++) { diff --git a/src/backend/linalg_internal_cpu/Gemm_internal.cpp b/src/backend/linalg_internal_cpu/Gemm_internal.cpp index 4526ff0e..4511767f 100644 --- a/src/backend/linalg_internal_cpu/Gemm_internal.cpp +++ b/src/backend/linalg_internal_cpu/Gemm_internal.cpp @@ -14,9 +14,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const Scalar &a, const Scalar &b) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_inl = (cytnx_complex128 *)inl->Mem; - cytnx_complex128 *_inr = (cytnx_complex128 *)inr->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_inl = (cytnx_complex128 *)inl->data(); + cytnx_complex128 *_inr = (cytnx_complex128 *)inr->data(); cytnx_complex128 alpha = complex128(a), beta = complex128(b); blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -29,9 +29,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const Scalar &a, const Scalar &b) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_inl = (cytnx_complex64 *)inl->Mem; - cytnx_complex64 *_inr = (cytnx_complex64 *)inr->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_inl = (cytnx_complex64 *)inl->data(); + cytnx_complex64 *_inr = (cytnx_complex64 *)inr->data(); cytnx_complex64 alpha = complex64(a), beta = complex64(b); blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -44,9 +44,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const Scalar &a, const Scalar &b) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); cytnx_double alpha = double(a), beta = double(b); blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -59,9 +59,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const Scalar &a, const Scalar &b) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); cytnx_float alpha = float(a), beta = float(b); blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; diff --git a/src/backend/linalg_internal_cpu/Ger_internal.cpp b/src/backend/linalg_internal_cpu/Ger_internal.cpp index 8b380dc6..9cc2865e 100644 --- a/src/backend/linalg_internal_cpu/Ger_internal.cpp +++ b/src/backend/linalg_internal_cpu/Ger_internal.cpp @@ -8,9 +8,9 @@ namespace cytnx { void Ger_internal_cd(boost::intrusive_ptr &A, const boost::intrusive_ptr &x, const boost::intrusive_ptr &y, const Scalar &a) { - cytnx_complex128 *_A = (cytnx_complex128 *)A->Mem; - cytnx_complex128 *_x = (cytnx_complex128 *)x->Mem; - cytnx_complex128 *_y = (cytnx_complex128 *)y->Mem; + cytnx_complex128 *_A = (cytnx_complex128 *)A->data(); + cytnx_complex128 *_x = (cytnx_complex128 *)x->data(); + cytnx_complex128 *_y = (cytnx_complex128 *)y->data(); cytnx_complex128 _a = complex128(a); cblas_zgeru(CblasRowMajor, x->size(), y->size(), (double *)&_a, (double *)_x, 1, (double *)_y, @@ -20,9 +20,9 @@ namespace cytnx { void Ger_internal_cf(boost::intrusive_ptr &A, const boost::intrusive_ptr &x, const boost::intrusive_ptr &y, const Scalar &a) { - cytnx_complex64 *_A = (cytnx_complex64 *)A->Mem; - cytnx_complex64 *_x = (cytnx_complex64 *)x->Mem; - cytnx_complex64 *_y = (cytnx_complex64 *)y->Mem; + cytnx_complex64 *_A = (cytnx_complex64 *)A->data(); + cytnx_complex64 *_x = (cytnx_complex64 *)x->data(); + cytnx_complex64 *_y = (cytnx_complex64 *)y->data(); cytnx_complex64 _a = complex64(a); cblas_cgeru(CblasRowMajor, x->size(), y->size(), (float *)&_a, (float *)_x, 1, (float *)_y, 1, @@ -32,9 +32,9 @@ namespace cytnx { void Ger_internal_d(boost::intrusive_ptr &A, const boost::intrusive_ptr &x, const boost::intrusive_ptr &y, const Scalar &a) { - cytnx_double *_A = (cytnx_double *)A->Mem; - cytnx_double *_x = (cytnx_double *)x->Mem; - cytnx_double *_y = (cytnx_double *)y->Mem; + cytnx_double *_A = (cytnx_double *)A->data(); + cytnx_double *_x = (cytnx_double *)x->data(); + cytnx_double *_y = (cytnx_double *)y->data(); cytnx_double _a = double(a); cblas_dger(CblasRowMajor, x->size(), y->size(), _a, _x, 1, _y, 1, _A, y->size()); @@ -43,9 +43,9 @@ namespace cytnx { void Ger_internal_f(boost::intrusive_ptr &A, const boost::intrusive_ptr &x, const boost::intrusive_ptr &y, const Scalar &a) { - cytnx_float *_A = (cytnx_float *)A->Mem; - cytnx_float *_x = (cytnx_float *)x->Mem; - cytnx_float *_y = (cytnx_float *)y->Mem; + cytnx_float *_A = (cytnx_float *)A->data(); + cytnx_float *_x = (cytnx_float *)x->data(); + cytnx_float *_y = (cytnx_float *)y->data(); cytnx_float _a = float(a); cblas_sger(CblasRowMajor, x->size(), y->size(), _a, _x, 1, _y, 1, _A, y->size()); diff --git a/src/backend/linalg_internal_cpu/Gesvd_internal.cpp b/src/backend/linalg_internal_cpu/Gesvd_internal.cpp index bb54994d..d71a0a84 100644 --- a/src/backend/linalg_internal_cpu/Gesvd_internal.cpp +++ b/src/backend/linalg_internal_cpu/Gesvd_internal.cpp @@ -15,11 +15,11 @@ namespace cytnx { char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. - jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; cytnx_complex128 *Mij = (cytnx_complex128 *)malloc(M * N * sizeof(cytnx_complex128)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_complex128)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_complex128)); lapack_int min = std::min(M, N); lapack_int ldA = N, ldu = N, ldvT = min; @@ -27,8 +27,8 @@ namespace cytnx { double *superb = (double *)malloc(sizeof(double) * (min - 1)); info = LAPACKE_zgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, (lapack_complex_double *)Mij, ldA, - (cytnx_double *)S->Mem, (lapack_complex_double *)vT->Mem, ldu, - (lapack_complex_double *)U->Mem, ldvT, superb); + (cytnx_double *)S->data(), (lapack_complex_double *)vT->data(), ldu, + (lapack_complex_double *)U->data(), ldvT, superb); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'zgesvd': Lapack INFO = ", info); @@ -44,11 +44,11 @@ namespace cytnx { char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. - jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; cytnx_complex64 *Mij = (cytnx_complex64 *)malloc(M * N * sizeof(cytnx_complex64)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_complex64)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_complex64)); lapack_int min = std::min(M, N); lapack_int ldA = N, ldu = N, ldvT = min; @@ -56,8 +56,8 @@ namespace cytnx { float *superb = (float *)malloc(sizeof(float) * (min - 1)); info = LAPACKE_cgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, (lapack_complex_float *)Mij, ldA, - (cytnx_float *)S->Mem, (lapack_complex_float *)vT->Mem, ldu, - (lapack_complex_float *)U->Mem, ldvT, superb); + (cytnx_float *)S->data(), (lapack_complex_float *)vT->data(), ldu, + (lapack_complex_float *)U->data(), ldvT, superb); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'cgesvd': Lapack INFO = ", info); @@ -72,19 +72,20 @@ namespace cytnx { const cytnx_int64 &N) { char jobu, jobv; - jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; cytnx_double *Mij = (cytnx_double *)malloc(M * N * sizeof(cytnx_double)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_double)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_double)); lapack_int min = std::min(M, N); lapack_int ldA = N, ldu = N, ldvT = min; lapack_int info; double *superb = (double *)malloc(sizeof(double) * (min - 1)); - info = LAPACKE_dgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, Mij, ldA, (cytnx_double *)S->Mem, - (cytnx_double *)vT->Mem, ldu, (cytnx_double *)U->Mem, ldvT, superb); + info = + LAPACKE_dgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, Mij, ldA, (cytnx_double *)S->data(), + (cytnx_double *)vT->data(), ldu, (cytnx_double *)U->data(), ldvT, superb); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dgesvd': Lapack INFO = ", info); @@ -99,19 +100,19 @@ namespace cytnx { const cytnx_int64 &N) { char jobu, jobv; - jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; cytnx_float *Mij = (cytnx_float *)malloc(M * N * sizeof(cytnx_float)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_float)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_float)); lapack_int min = std::min(M, N); lapack_int ldA = N, ldu = N, ldvT = min; lapack_int info; float *superb = (float *)malloc(sizeof(float) * (min - 1)); - info = LAPACKE_sgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, Mij, ldA, (cytnx_float *)S->Mem, - (cytnx_float *)vT->Mem, ldu, (cytnx_float *)U->Mem, ldvT, superb); + info = LAPACKE_sgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, Mij, ldA, (cytnx_float *)S->data(), + (cytnx_float *)vT->data(), ldu, (cytnx_float *)U->data(), ldvT, superb); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'sgesvd': Lapack INFO = ", info); diff --git a/src/backend/linalg_internal_cpu/InvM_inplace_internal.cpp b/src/backend/linalg_internal_cpu/InvM_inplace_internal.cpp index 9ce7cef3..5f92f3e1 100644 --- a/src/backend/linalg_internal_cpu/InvM_inplace_internal.cpp +++ b/src/backend/linalg_internal_cpu/InvM_inplace_internal.cpp @@ -9,11 +9,11 @@ namespace cytnx { void InvM_inplace_internal_d(boost::intrusive_ptr &iten, const cytnx_int64 &L) { lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int info; - info = LAPACKE_dgetrf(LAPACK_COL_MAJOR, L, L, (cytnx_double *)iten->Mem, L, ipiv); + info = LAPACKE_dgetrf(LAPACK_COL_MAJOR, L, L, (cytnx_double *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dgetrf': Lapack INFO = ", info); - info = LAPACKE_dgetri(LAPACK_COL_MAJOR, L, (cytnx_double *)iten->Mem, L, ipiv); + info = LAPACKE_dgetri(LAPACK_COL_MAJOR, L, (cytnx_double *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dgetri': Lapack INFO = ", info); @@ -24,11 +24,11 @@ namespace cytnx { void InvM_inplace_internal_f(boost::intrusive_ptr &iten, const cytnx_int64 &L) { lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int info; - info = LAPACKE_sgetrf(LAPACK_COL_MAJOR, L, L, (cytnx_float *)iten->Mem, L, ipiv); + info = LAPACKE_sgetrf(LAPACK_COL_MAJOR, L, L, (cytnx_float *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'sgetrf': Lapack INFO = ", info); - info = LAPACKE_sgetri(LAPACK_COL_MAJOR, L, (cytnx_float *)iten->Mem, L, ipiv); + info = LAPACKE_sgetri(LAPACK_COL_MAJOR, L, (cytnx_float *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'sgetri': Lapack INFO = ", info); @@ -38,11 +38,11 @@ namespace cytnx { void InvM_inplace_internal_cd(boost::intrusive_ptr &iten, const cytnx_int64 &L) { lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int info; - info = LAPACKE_zgetrf(LAPACK_COL_MAJOR, L, L, (lapack_complex_double *)iten->Mem, L, ipiv); + info = LAPACKE_zgetrf(LAPACK_COL_MAJOR, L, L, (lapack_complex_double *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'zgetrf': Lapack INFO = ", info); - info = LAPACKE_zgetri(LAPACK_COL_MAJOR, L, (lapack_complex_double *)iten->Mem, L, ipiv); + info = LAPACKE_zgetri(LAPACK_COL_MAJOR, L, (lapack_complex_double *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'zgetri': Lapack INFO = ", info); @@ -53,11 +53,11 @@ namespace cytnx { void InvM_inplace_internal_cf(boost::intrusive_ptr &iten, const cytnx_int64 &L) { lapack_int *ipiv = (lapack_int *)malloc((L + 1) * sizeof(lapack_int)); lapack_int info; - info = LAPACKE_cgetrf(LAPACK_COL_MAJOR, L, L, (lapack_complex_float *)iten->Mem, L, ipiv); + info = LAPACKE_cgetrf(LAPACK_COL_MAJOR, L, L, (lapack_complex_float *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'cgetrf': Lapack INFO = ", info); - info = LAPACKE_cgetri(LAPACK_COL_MAJOR, L, (lapack_complex_float *)iten->Mem, L, ipiv); + info = LAPACKE_cgetri(LAPACK_COL_MAJOR, L, (lapack_complex_float *)iten->data(), L, ipiv); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'cgetri': Lapack INFO = ", info); diff --git a/src/backend/linalg_internal_cpu/Inv_inplace_internal.cpp b/src/backend/linalg_internal_cpu/Inv_inplace_internal.cpp index 8e476fe0..af5306f9 100644 --- a/src/backend/linalg_internal_cpu/Inv_inplace_internal.cpp +++ b/src/backend/linalg_internal_cpu/Inv_inplace_internal.cpp @@ -11,7 +11,8 @@ namespace cytnx { void Inv_inplace_internal_d(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const double &clip) { - cytnx_double *_ten = (cytnx_double *)ten->Mem; + cytnx_double *_ten = (cytnx_double *)ten->data(); + #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { _ten[n] = _ten[n] < clip ? 0 : double(1) / _ten[n]; @@ -20,7 +21,8 @@ namespace cytnx { void Inv_inplace_internal_f(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const double &clip) { - cytnx_float *_ten = (cytnx_float *)ten->Mem; + cytnx_float *_ten = (cytnx_float *)ten->data(); + #pragma omp parallel for for (cytnx_uint64 n = 0; n < Nelem; n++) { _ten[n] = _ten[n] < clip ? 0 : float(1) / _ten[n]; @@ -29,7 +31,7 @@ namespace cytnx { void Inv_inplace_internal_cd(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const double &clip) { - cytnx_complex128 *_ten = (cytnx_complex128 *)ten->Mem; + cytnx_complex128 *_ten = (cytnx_complex128 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -40,7 +42,7 @@ namespace cytnx { void Inv_inplace_internal_cf(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const double &clip) { - cytnx_complex64 *_ten = (cytnx_complex64 *)ten->Mem; + cytnx_complex64 *_ten = (cytnx_complex64 *)ten->data(); #pragma omp parallel for for (cytnx_uint64 n = 0; n < Nelem; n++) { diff --git a/src/backend/linalg_internal_cpu/Lstsq_internal.cpp b/src/backend/linalg_internal_cpu/Lstsq_internal.cpp index 81cd432c..88679edc 100644 --- a/src/backend/linalg_internal_cpu/Lstsq_internal.cpp +++ b/src/backend/linalg_internal_cpu/Lstsq_internal.cpp @@ -13,8 +13,8 @@ namespace cytnx { lda = N; ldb = nrhs; info = LAPACKE_dgelsd(LAPACK_ROW_MAJOR, (lapack_int)M, (lapack_int)N, (lapack_int)nrhs, - (double *)in->Mem, lda, (double *)b->Mem, ldb, (double *)s->Mem, - (double)rcond, (lapack_int *)r->Mem); + (double *)in->data(), lda, (double *)b->data(), ldb, + (double *)s->data(), (double)rcond, (lapack_int *)r->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dgelsd': Lapack INFO = ", info); } @@ -28,8 +28,8 @@ namespace cytnx { lda = N; ldb = nrhs; info = LAPACKE_sgelsd(LAPACK_ROW_MAJOR, (lapack_int)M, (lapack_int)N, (lapack_int)nrhs, - (float *)in->Mem, lda, (float *)b->Mem, ldb, (float *)s->Mem, - (float)rcond, (lapack_int *)r->Mem); + (float *)in->data(), lda, (float *)b->data(), ldb, (float *)s->data(), + (float)rcond, (lapack_int *)r->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'sgelsd': Lapack INFO = ", info); } @@ -43,9 +43,10 @@ namespace cytnx { lapack_int info, lda, ldb; lda = N; ldb = nrhs; - info = LAPACKE_cgelsd(LAPACK_ROW_MAJOR, (lapack_int)M, (lapack_int)N, (lapack_int)nrhs, - (lapack_complex_float *)in->Mem, lda, (lapack_complex_float *)b->Mem, - ldb, (float *)s->Mem, (float)rcond, (lapack_int *)r->Mem); + info = + LAPACKE_cgelsd(LAPACK_ROW_MAJOR, (lapack_int)M, (lapack_int)N, (lapack_int)nrhs, + (lapack_complex_float *)in->data(), lda, (lapack_complex_float *)b->data(), + ldb, (float *)s->data(), (float)rcond, (lapack_int *)r->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'cgelsd': Lapack INFO = ", info); } @@ -59,9 +60,10 @@ namespace cytnx { lapack_int info, lda, ldb; lda = N; ldb = nrhs; - info = LAPACKE_zgelsd(LAPACK_ROW_MAJOR, (lapack_int)M, (lapack_int)N, (lapack_int)nrhs, - (lapack_complex_double *)in->Mem, lda, (lapack_complex_double *)b->Mem, - ldb, (double *)s->Mem, (double)rcond, (lapack_int *)r->Mem); + info = + LAPACKE_zgelsd(LAPACK_ROW_MAJOR, (lapack_int)M, (lapack_int)N, (lapack_int)nrhs, + (lapack_complex_double *)in->data(), lda, (lapack_complex_double *)b->data(), + ldb, (double *)s->data(), (double)rcond, (lapack_int *)r->data()); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'zgelsd': Lapack INFO = ", info); } diff --git a/src/backend/linalg_internal_cpu/Matmul_dg_internal.cpp b/src/backend/linalg_internal_cpu/Matmul_dg_internal.cpp index 3a6927b3..926416f8 100644 --- a/src/backend/linalg_internal_cpu/Matmul_dg_internal.cpp +++ b/src/backend/linalg_internal_cpu/Matmul_dg_internal.cpp @@ -31,9 +31,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_inl = (cytnx_complex128 *)inl->Mem; - cytnx_complex128 *_inr = (cytnx_complex128 *)inr->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_inl = (cytnx_complex128 *)inl->data(); + cytnx_complex128 *_inr = (cytnx_complex128 *)inr->data(); blas_int blsMl = Ml, blsNr = Nr; blas_int blsONE = 1; @@ -49,9 +49,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_inl = (cytnx_complex64 *)inl->Mem; - cytnx_complex64 *_inr = (cytnx_complex64 *)inr->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_inl = (cytnx_complex64 *)inl->data(); + cytnx_complex64 *_inr = (cytnx_complex64 *)inr->data(); blas_int blsMl = Ml, blsNr = Nr; blas_int blsONE = 1; @@ -67,9 +67,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); blas_int blsMl = Ml, blsNr = Nr; blas_int blsONE = 1; @@ -85,9 +85,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); blas_int blsMl = Ml, blsNr = Nr; blas_int blsONE = 1; @@ -104,9 +104,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); if (diag_L) { Matmul_dg_diagL_driver(_out, _inl, _inr, Ml, Comm, Nr); } else { @@ -119,9 +119,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); if (diag_L) { Matmul_dg_diagL_driver(_out, _inl, _inr, Ml, Comm, Nr); } else { @@ -134,9 +134,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); if (diag_L) { Matmul_dg_diagL_driver(_out, _inl, _inr, Ml, Comm, Nr); } else { @@ -149,9 +149,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); if (diag_L) { Matmul_dg_diagL_driver(_out, _inl, _inr, Ml, Comm, Nr); } else { @@ -163,9 +163,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_inl = (cytnx_int16 *)inl->Mem; - cytnx_int16 *_inr = (cytnx_int16 *)inr->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_inl = (cytnx_int16 *)inl->data(); + cytnx_int16 *_inr = (cytnx_int16 *)inr->data(); if (diag_L) { Matmul_dg_diagL_driver(_out, _inl, _inr, Ml, Comm, Nr); } else { @@ -178,9 +178,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_inl = (cytnx_uint16 *)inl->Mem; - cytnx_uint16 *_inr = (cytnx_uint16 *)inr->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_inl = (cytnx_uint16 *)inl->data(); + cytnx_uint16 *_inr = (cytnx_uint16 *)inr->data(); if (diag_L) { Matmul_dg_diagL_driver(_out, _inl, _inr, Ml, Comm, Nr); } else { @@ -191,9 +191,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_inl = (cytnx_bool *)inl->Mem; - cytnx_bool *_inr = (cytnx_bool *)inr->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_inl = (cytnx_bool *)inl->data(); + cytnx_bool *_inr = (cytnx_bool *)inr->data(); if (diag_L) { Matmul_dg_diagL_driver(_out, _inl, _inr, Ml, Comm, Nr); } else { diff --git a/src/backend/linalg_internal_cpu/Matmul_internal.cpp b/src/backend/linalg_internal_cpu/Matmul_internal.cpp index 84dbbbee..70f82d20 100644 --- a/src/backend/linalg_internal_cpu/Matmul_internal.cpp +++ b/src/backend/linalg_internal_cpu/Matmul_internal.cpp @@ -28,9 +28,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_inl = (cytnx_complex128 *)inl->Mem; - cytnx_complex128 *_inr = (cytnx_complex128 *)inr->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_inl = (cytnx_complex128 *)inl->data(); + cytnx_complex128 *_inr = (cytnx_complex128 *)inr->data(); cytnx_complex128 alpha = cytnx_complex128(1, 0), beta = cytnx_complex128(0, 0); blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -42,9 +42,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_inl = (cytnx_complex64 *)inl->Mem; - cytnx_complex64 *_inr = (cytnx_complex64 *)inr->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_inl = (cytnx_complex64 *)inl->data(); + cytnx_complex64 *_inr = (cytnx_complex64 *)inr->data(); cytnx_complex64 alpha = cytnx_complex64(1, 0), beta = cytnx_complex64(0, 0); blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -56,9 +56,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); cytnx_double alpha = 1, beta = 0; blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -70,9 +70,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); cytnx_float alpha = 1, beta = 0; blas_int blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -84,9 +84,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); Matmul_driver(_out, _inl, _inr, Ml, Comm, Nr); } @@ -94,9 +94,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); Matmul_driver(_out, _inl, _inr, Ml, Comm, Nr); } @@ -104,9 +104,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); Matmul_driver(_out, _inl, _inr, Ml, Comm, Nr); } @@ -114,18 +114,18 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); Matmul_driver(_out, _inl, _inr, Ml, Comm, Nr); } void Matmul_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_inl = (cytnx_int16 *)inl->Mem; - cytnx_int16 *_inr = (cytnx_int16 *)inr->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_inl = (cytnx_int16 *)inl->data(); + cytnx_int16 *_inr = (cytnx_int16 *)inr->data(); Matmul_driver(_out, _inl, _inr, Ml, Comm, Nr); } @@ -133,18 +133,18 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_inl = (cytnx_uint16 *)inl->Mem; - cytnx_uint16 *_inr = (cytnx_uint16 *)inr->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_inl = (cytnx_uint16 *)inl->data(); + cytnx_uint16 *_inr = (cytnx_uint16 *)inr->data(); Matmul_driver(_out, _inl, _inr, Ml, Comm, Nr); } void Matmul_internal_b(boost::intrusive_ptr &out, const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_inl = (cytnx_bool *)inl->Mem; - cytnx_bool *_inr = (cytnx_bool *)inr->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_inl = (cytnx_bool *)inl->data(); + cytnx_bool *_inr = (cytnx_bool *)inr->data(); Matmul_driver(_out, _inl, _inr, Ml, Comm, Nr); } diff --git a/src/backend/linalg_internal_cpu/Matvec_internal.cpp b/src/backend/linalg_internal_cpu/Matvec_internal.cpp index 9ba2071d..11d6aac6 100644 --- a/src/backend/linalg_internal_cpu/Matvec_internal.cpp +++ b/src/backend/linalg_internal_cpu/Matvec_internal.cpp @@ -26,9 +26,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_inl = (cytnx_complex128 *)inl->Mem; - cytnx_complex128 *_inr = (cytnx_complex128 *)inr->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_inl = (cytnx_complex128 *)inl->data(); + cytnx_complex128 *_inr = (cytnx_complex128 *)inr->data(); cytnx_complex128 alpha = cytnx_complex128(1, 0), beta = cytnx_complex128(0, 0); blas_int ONE = 1; @@ -41,9 +41,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_inl = (cytnx_complex64 *)inl->Mem; - cytnx_complex64 *_inr = (cytnx_complex64 *)inr->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_inl = (cytnx_complex64 *)inl->data(); + cytnx_complex64 *_inr = (cytnx_complex64 *)inr->data(); cytnx_complex64 alpha = cytnx_complex64(1, 0), beta = cytnx_complex64(0, 0); blas_int ONE = 1; @@ -56,9 +56,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); cytnx_double alpha = 1, beta = 0; blas_int ONE = 1; @@ -71,9 +71,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); cytnx_float alpha = 1, beta = 0; blas_int ONE = 1; @@ -86,9 +86,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); Matvec_driver(_out, _inl, _inr, Ml, Nr); } @@ -96,9 +96,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); Matvec_driver(_out, _inl, _inr, Ml, Nr); } @@ -106,9 +106,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); Matvec_driver(_out, _inl, _inr, Ml, Nr); } @@ -116,18 +116,18 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); Matvec_driver(_out, _inl, _inr, Ml, Nr); } void Matvec_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_inl = (cytnx_int16 *)inl->Mem; - cytnx_int16 *_inr = (cytnx_int16 *)inr->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_inl = (cytnx_int16 *)inl->data(); + cytnx_int16 *_inr = (cytnx_int16 *)inr->data(); Matvec_driver(_out, _inl, _inr, Ml, Nr); } @@ -135,18 +135,18 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_inl = (cytnx_uint16 *)inl->Mem; - cytnx_uint16 *_inr = (cytnx_uint16 *)inr->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_inl = (cytnx_uint16 *)inl->data(); + cytnx_uint16 *_inr = (cytnx_uint16 *)inr->data(); Matvec_driver(_out, _inl, _inr, Ml, Nr); } void Matvec_internal_b(boost::intrusive_ptr &out, const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_inl = (cytnx_bool *)inl->Mem; - cytnx_bool *_inr = (cytnx_bool *)inr->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_inl = (cytnx_bool *)inl->data(); + cytnx_bool *_inr = (cytnx_bool *)inr->data(); Matvec_driver(_out, _inl, _inr, Ml, Nr); } diff --git a/src/backend/linalg_internal_cpu/MaxMin_internal.cpp b/src/backend/linalg_internal_cpu/MaxMin_internal.cpp index a9f3089f..fbcc6876 100644 --- a/src/backend/linalg_internal_cpu/MaxMin_internal.cpp +++ b/src/backend/linalg_internal_cpu/MaxMin_internal.cpp @@ -15,8 +15,8 @@ namespace cytnx { void MaxMin_internal_u64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_uint64 *_ten = (cytnx_uint64 *)ten->Mem; - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; + cytnx_uint64 *_ten = (cytnx_uint64 *)ten->data(); + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -75,8 +75,8 @@ namespace cytnx { void MaxMin_internal_i64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_int64 *_ten = (cytnx_int64 *)ten->Mem; - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; + cytnx_int64 *_ten = (cytnx_int64 *)ten->data(); + cytnx_int64 *_out = (cytnx_int64 *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -135,8 +135,8 @@ namespace cytnx { void MaxMin_internal_u32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_uint32 *_ten = (cytnx_uint32 *)ten->Mem; - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; + cytnx_uint32 *_ten = (cytnx_uint32 *)ten->data(); + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -195,8 +195,8 @@ namespace cytnx { void MaxMin_internal_i32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_int32 *_ten = (cytnx_int32 *)ten->Mem; - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; + cytnx_int32 *_ten = (cytnx_int32 *)ten->data(); + cytnx_int32 *_out = (cytnx_int32 *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -255,8 +255,8 @@ namespace cytnx { void MaxMin_internal_u16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_uint16 *_ten = (cytnx_uint16 *)ten->Mem; - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; + cytnx_uint16 *_ten = (cytnx_uint16 *)ten->data(); + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -315,8 +315,8 @@ namespace cytnx { void MaxMin_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_int16 *_ten = (cytnx_int16 *)ten->Mem; - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; + cytnx_int16 *_ten = (cytnx_int16 *)ten->data(); + cytnx_int16 *_out = (cytnx_int16 *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -375,8 +375,8 @@ namespace cytnx { void MaxMin_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_double *_ten = (cytnx_double *)ten->Mem; - cytnx_double *_out = (cytnx_double *)out->Mem; + cytnx_double *_ten = (cytnx_double *)ten->data(); + cytnx_double *_out = (cytnx_double *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -435,8 +435,8 @@ namespace cytnx { void MaxMin_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_float *_ten = (cytnx_float *)ten->Mem; - cytnx_float *_out = (cytnx_float *)out->Mem; + cytnx_float *_ten = (cytnx_float *)ten->data(); + cytnx_float *_out = (cytnx_float *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -494,8 +494,8 @@ namespace cytnx { void MaxMin_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_complex128 *_ten = (cytnx_complex128 *)ten->Mem; - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; + cytnx_complex128 *_ten = (cytnx_complex128 *)ten->data(); + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); if (type == 'x') { #ifdef UNI_OMP @@ -556,8 +556,8 @@ namespace cytnx { void MaxMin_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_complex64 *_ten = (cytnx_complex64 *)ten->Mem; - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; + cytnx_complex64 *_ten = (cytnx_complex64 *)ten->data(); + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); if (type == 'x') { #ifdef UNI_OMP diff --git a/src/backend/linalg_internal_cpu/Mod_internal.cpp b/src/backend/linalg_internal_cpu/Mod_internal.cpp index 93925561..e2712c71 100644 --- a/src/backend/linalg_internal_cpu/Mod_internal.cpp +++ b/src/backend/linalg_internal_cpu/Mod_internal.cpp @@ -210,9 +210,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -262,9 +262,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -314,9 +314,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -366,9 +366,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -418,9 +418,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -470,9 +470,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -522,9 +522,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -574,9 +574,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -626,9 +626,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -695,9 +695,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -747,9 +747,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -799,9 +799,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -851,9 +851,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -903,9 +903,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -955,9 +955,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1007,9 +1007,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1059,9 +1059,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1111,9 +1111,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1181,9 +1181,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1233,9 +1233,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1285,9 +1285,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1337,9 +1337,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1389,9 +1389,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1441,9 +1441,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1493,9 +1493,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1545,9 +1545,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1597,9 +1597,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1667,9 +1667,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1719,9 +1719,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1771,9 +1771,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1823,9 +1823,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1875,9 +1875,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1927,9 +1927,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1979,9 +1979,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2031,9 +2031,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2083,9 +2083,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2153,9 +2153,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2205,9 +2205,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2257,9 +2257,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2309,9 +2309,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2361,9 +2361,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2413,9 +2413,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2465,9 +2465,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2517,9 +2517,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2569,9 +2569,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2639,9 +2639,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2691,9 +2691,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2743,9 +2743,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2795,9 +2795,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2847,9 +2847,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2899,9 +2899,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2951,9 +2951,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3003,9 +3003,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3055,9 +3055,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3125,9 +3125,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3177,9 +3177,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3229,9 +3229,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3281,9 +3281,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3333,9 +3333,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3385,9 +3385,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3437,9 +3437,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3489,9 +3489,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3541,9 +3541,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3610,9 +3610,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3662,9 +3662,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3714,9 +3714,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3766,9 +3766,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3818,9 +3818,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3870,9 +3870,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3922,9 +3922,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3974,9 +3974,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4026,9 +4026,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4095,9 +4095,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4147,9 +4147,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4199,9 +4199,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4251,9 +4251,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4303,9 +4303,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4355,9 +4355,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4407,9 +4407,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4459,9 +4459,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4511,9 +4511,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) diff --git a/src/backend/linalg_internal_cpu/Mul_internal.cpp b/src/backend/linalg_internal_cpu/Mul_internal.cpp index d195ad22..16a492e8 100644 --- a/src/backend/linalg_internal_cpu/Mul_internal.cpp +++ b/src/backend/linalg_internal_cpu/Mul_internal.cpp @@ -17,9 +17,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -82,9 +82,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -141,9 +141,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -201,9 +201,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -261,9 +261,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -321,9 +321,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -380,9 +380,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -440,9 +440,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -500,9 +500,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -561,9 +561,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -623,9 +623,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset( @@ -699,9 +699,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -763,9 +763,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -823,9 +823,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -883,9 +883,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -943,9 +943,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1003,9 +1003,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1062,9 +1062,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1121,9 +1121,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1180,9 +1180,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1239,9 +1239,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -1321,9 +1321,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -1386,9 +1386,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -1446,9 +1446,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -1506,9 +1506,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1565,9 +1565,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -1625,9 +1625,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1684,9 +1684,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -1744,9 +1744,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -1803,9 +1803,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -1893,9 +1893,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -1957,9 +1957,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -2017,9 +2017,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -2076,9 +2076,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -2135,9 +2135,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -2195,9 +2195,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; @@ -2255,9 +2255,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); blas_int N = len; blas_int ONE = 1; if (Lin->size() == 1) { @@ -2314,9 +2314,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -2413,9 +2413,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2465,9 +2465,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2517,9 +2517,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2569,9 +2569,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2621,9 +2621,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2673,9 +2673,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2725,9 +2725,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -2831,9 +2831,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2883,9 +2883,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2935,9 +2935,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2987,9 +2987,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3039,9 +3039,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3091,9 +3091,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -3205,9 +3205,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3257,9 +3257,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3309,9 +3309,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3361,9 +3361,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3413,9 +3413,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -3536,9 +3536,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3588,9 +3588,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3640,9 +3640,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3692,9 +3692,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -3822,9 +3822,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3874,9 +3874,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3926,9 +3926,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -4064,9 +4064,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4116,9 +4116,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { memset(_out, 0, @@ -4262,9 +4262,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { if (_Lin[0]) diff --git a/src/backend/linalg_internal_cpu/Norm_internal.cpp b/src/backend/linalg_internal_cpu/Norm_internal.cpp index 4f316b24..f6e6295d 100644 --- a/src/backend/linalg_internal_cpu/Norm_internal.cpp +++ b/src/backend/linalg_internal_cpu/Norm_internal.cpp @@ -13,34 +13,34 @@ namespace cytnx { /// Norm void Norm_internal_cd(void *out, const boost::intrusive_ptr &Rin) { double *od = static_cast(out); - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); blas_int incx = 1; - blas_int Len = Rin->len; + blas_int Len = Rin->size(); *od = dznrm2(&Len, _Rin, &incx); } void Norm_internal_cf(void *out, const boost::intrusive_ptr &Rin) { float *od = static_cast(out); - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); blas_int incx = 1; - blas_int Len = Rin->len; + blas_int Len = Rin->size(); *od = scnrm2(&Len, _Rin, &incx); } void Norm_internal_d(void *out, const boost::intrusive_ptr &Rin) { double *od = static_cast(out); - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_Rin = (cytnx_double *)Rin->data(); blas_int incx = 1; - blas_int Len = Rin->len; + blas_int Len = Rin->size(); *od = dnrm2(&Len, _Rin, &incx); } void Norm_internal_f(void *out, const boost::intrusive_ptr &Rin) { float *od = static_cast(out); - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_Rin = (cytnx_float *)Rin->data(); blas_int incx = 1; - blas_int Len = Rin->len; + blas_int Len = Rin->size(); *od = snrm2(&Len, _Rin, &incx); } diff --git a/src/backend/linalg_internal_cpu/Outer_internal.cpp b/src/backend/linalg_internal_cpu/Outer_internal.cpp index 8e7e55df..4d8c670b 100644 --- a/src/backend/linalg_internal_cpu/Outer_internal.cpp +++ b/src/backend/linalg_internal_cpu/Outer_internal.cpp @@ -19,9 +19,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const cytnx_uint64 &j1, const cytnx_uint64 &j2) { - TO *_out = (TO *)out->Mem; - T1 *_Lin = (T1 *)Lin->Mem; - T2 *_Rin = (T2 *)Rin->Mem; + TO *_out = (TO *)out->data(); + T1 *_Lin = (T1 *)Lin->data(); + T2 *_Rin = (T2 *)Rin->data(); #pragma omp parallel for schedule(dynamic) for (unsigned long long r = 0; r < j1 * j2; r++) { diff --git a/src/backend/linalg_internal_cpu/Pow_internal.cpp b/src/backend/linalg_internal_cpu/Pow_internal.cpp index c304d6c1..8658edcd 100644 --- a/src/backend/linalg_internal_cpu/Pow_internal.cpp +++ b/src/backend/linalg_internal_cpu/Pow_internal.cpp @@ -11,8 +11,8 @@ namespace cytnx { void Pow_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const cytnx_double &p) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_ten = (cytnx_double *)ten->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_ten = (cytnx_double *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -23,8 +23,8 @@ namespace cytnx { void Pow_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const cytnx_double &p) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_ten = (cytnx_float *)ten->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_ten = (cytnx_float *)ten->data(); #pragma omp parallel for for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -35,8 +35,8 @@ namespace cytnx { void Pow_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const cytnx_double &p) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_ten = (cytnx_complex128 *)ten->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_ten = (cytnx_complex128 *)ten->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -47,8 +47,8 @@ namespace cytnx { void Pow_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const cytnx_double &p) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_ten = (cytnx_complex64 *)ten->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_ten = (cytnx_complex64 *)ten->data(); #pragma omp parallel for for (cytnx_uint64 n = 0; n < Nelem; n++) { diff --git a/src/backend/linalg_internal_cpu/QR_internal.cpp b/src/backend/linalg_internal_cpu/QR_internal.cpp index 0057e24c..a9b807d7 100644 --- a/src/backend/linalg_internal_cpu/QR_internal.cpp +++ b/src/backend/linalg_internal_cpu/QR_internal.cpp @@ -34,12 +34,12 @@ namespace cytnx { // Q should be the same shape as in // tau should be the min(M,N) - cytnx_complex128 *pQ = (cytnx_complex128 *)Q->Mem; - cytnx_complex128 *pR = (cytnx_complex128 *)R->Mem; - cytnx_complex128 *ptau = (cytnx_complex128 *)tau->Mem; + cytnx_complex128 *pQ = (cytnx_complex128 *)Q->data(); + cytnx_complex128 *pR = (cytnx_complex128 *)R->data(); + cytnx_complex128 *ptau = (cytnx_complex128 *)tau->data(); // cytnx_complex128* Mij = (cytnx_complex128*)malloc(M * N * sizeof(cytnx_complex128)); - memcpy(pQ, in->Mem, M * N * sizeof(cytnx_complex128)); + memcpy(pQ, in->data(), M * N * sizeof(cytnx_complex128)); lapack_int ldA = N; lapack_int info; @@ -56,7 +56,7 @@ namespace cytnx { // getD: if (is_d) { - cytnx_complex128 *pD = (cytnx_complex128 *)D->Mem; + cytnx_complex128 *pD = (cytnx_complex128 *)D->data(); GetDiag(pD, pR, M, N, N); cytnx_uint64 min = M < N ? M : N; // normalize: @@ -87,12 +87,12 @@ namespace cytnx { // Q should be the same shape as in // tau should be the min(M,N) - cytnx_complex64 *pQ = (cytnx_complex64 *)Q->Mem; - cytnx_complex64 *pR = (cytnx_complex64 *)R->Mem; - cytnx_complex64 *ptau = (cytnx_complex64 *)tau->Mem; + cytnx_complex64 *pQ = (cytnx_complex64 *)Q->data(); + cytnx_complex64 *pR = (cytnx_complex64 *)R->data(); + cytnx_complex64 *ptau = (cytnx_complex64 *)tau->data(); // cytnx_complex128* Mij = (cytnx_complex128*)malloc(M * N * sizeof(cytnx_complex128)); - memcpy(pQ, in->Mem, M * N * sizeof(cytnx_complex64)); + memcpy(pQ, in->data(), M * N * sizeof(cytnx_complex64)); lapack_int ldA = N; lapack_int info; @@ -109,7 +109,7 @@ namespace cytnx { // getD: if (is_d) { - cytnx_complex64 *pD = (cytnx_complex64 *)D->Mem; + cytnx_complex64 *pD = (cytnx_complex64 *)D->data(); GetDiag(pD, pR, M, N, N); cytnx_uint64 min = M < N ? M : N; // normalize: @@ -139,11 +139,11 @@ namespace cytnx { // Q should be the same shape as in // tau should be the min(M,N) - cytnx_double *pQ = (cytnx_double *)Q->Mem; - cytnx_double *pR = (cytnx_double *)R->Mem; - cytnx_double *ptau = (cytnx_double *)tau->Mem; + cytnx_double *pQ = (cytnx_double *)Q->data(); + cytnx_double *pR = (cytnx_double *)R->data(); + cytnx_double *ptau = (cytnx_double *)tau->data(); - memcpy(pQ, in->Mem, M * N * sizeof(cytnx_double)); + memcpy(pQ, in->data(), M * N * sizeof(cytnx_double)); lapack_int ldA = N; lapack_int info; @@ -159,7 +159,7 @@ namespace cytnx { // getD: if (is_d) { - cytnx_double *pD = (cytnx_double *)D->Mem; + cytnx_double *pD = (cytnx_double *)D->data(); GetDiag(pD, pR, M, N, N); cytnx_uint64 min = M < N ? M : N; // normalize: @@ -187,12 +187,12 @@ namespace cytnx { // Q should be the same shape as in // tau should be the min(M,N) - cytnx_float *pQ = (cytnx_float *)Q->Mem; - cytnx_float *pR = (cytnx_float *)R->Mem; - cytnx_float *ptau = (cytnx_float *)tau->Mem; + cytnx_float *pQ = (cytnx_float *)Q->data(); + cytnx_float *pR = (cytnx_float *)R->data(); + cytnx_float *ptau = (cytnx_float *)tau->data(); // cytnx_complex128* Mij = (cytnx_complex128*)malloc(M * N * sizeof(cytnx_complex128)); - memcpy(pQ, in->Mem, M * N * sizeof(cytnx_float)); + memcpy(pQ, in->data(), M * N * sizeof(cytnx_float)); lapack_int ldA = N; lapack_int info; @@ -208,7 +208,7 @@ namespace cytnx { // getD: if (is_d) { - cytnx_float *pD = (cytnx_float *)D->Mem; + cytnx_float *pD = (cytnx_float *)D->data(); GetDiag(pD, pR, M, N, N); cytnx_uint64 min = M < N ? M : N; // normalize: diff --git a/src/backend/linalg_internal_cpu/Sdd_internal.cpp b/src/backend/linalg_internal_cpu/Sdd_internal.cpp index a7feb8c9..04ccc3c0 100644 --- a/src/backend/linalg_internal_cpu/Sdd_internal.cpp +++ b/src/backend/linalg_internal_cpu/Sdd_internal.cpp @@ -15,8 +15,8 @@ namespace cytnx { // char jobu, jobv; // // if U and vT are NULL ptr, then it will not be computed. - // jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - // jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + // jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + // jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; lapack_int min = std::min(M, N); lapack_int max = std::max(M, N); @@ -24,16 +24,18 @@ namespace cytnx { lapack_int info; cytnx_complex128 *Mij = (cytnx_complex128 *)malloc(M * N * sizeof(cytnx_complex128)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_complex128)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_complex128)); char jobz = 'S'; - if (U->dtype == Type.Void and vT->dtype == Type.Void) { + if (U->dtype() == Type.Void and vT->dtype() == Type.Void) { jobz = 'N'; } void *UMem = - (U->Mem ? U->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex128)) : NULL)); + (U->data() ? U->data() + : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex128)) : NULL)); void *vTMem = - (vT->Mem ? vT->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex128)) : NULL)); + (vT->data() ? vT->data() + : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex128)) : NULL)); // double *superb = (double *)malloc(sizeof(double) * (min - 1)); @@ -42,17 +44,17 @@ namespace cytnx { // (cytnx_double *)S->Mem, (lapack_complex_double *)vT->Mem, ldu, // (lapack_complex_double *)U->Mem, ldvT, superb); info = LAPACKE_zgesdd(LAPACK_COL_MAJOR, jobz, N, M, (lapack_complex_double *)Mij, ldA, - (cytnx_double *)S->Mem, (lapack_complex_double *)vTMem, ldu, + (cytnx_double *)S->data(), (lapack_complex_double *)vTMem, ldu, (lapack_complex_double *)UMem, ldvT); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'zgesvd': Lapack INFO = ", info); free(Mij); // free(superb); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { free(UMem); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { free(vTMem); } } @@ -63,8 +65,8 @@ namespace cytnx { const cytnx_int64 &N) { // char jobu, jobv; - // jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - // jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + // jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + // jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; lapack_int min = std::min(M, N); lapack_int max = std::max(M, N); @@ -72,31 +74,32 @@ namespace cytnx { lapack_int info; cytnx_complex64 *Mij = (cytnx_complex64 *)malloc(M * N * sizeof(cytnx_complex64)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_complex64)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_complex64)); char jobz = 'S'; - if (U->dtype == Type.Void and vT->dtype == Type.Void) { + if (U->dtype() == Type.Void and vT->dtype() == Type.Void) { jobz = 'N'; } - void *UMem = - (U->Mem ? U->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex64)) : NULL)); + void *UMem = (U->data() ? U->data() + : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex64)) : NULL)); void *vTMem = - (vT->Mem ? vT->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex64)) : NULL)); + (vT->data() ? vT->data() + : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_complex64)) : NULL)); // double *superb = (double *)malloc(sizeof(double) * (min - 1)); // info = LAPACKE_dgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, Mij, ldA, (cytnx_double *)S->Mem, // (cytnx_double *)vT->Mem, ldu, (cytnx_double *)U->Mem, ldvT, superb); info = LAPACKE_cgesdd(LAPACK_COL_MAJOR, jobz, N, M, (lapack_complex_float *)Mij, ldA, - (cytnx_float *)S->Mem, (lapack_complex_float *)vTMem, ldu, + (cytnx_float *)S->data(), (lapack_complex_float *)vTMem, ldu, (lapack_complex_float *)UMem, ldvT); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dgesvd': Lapack INFO = ", info); // free(superb); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { free(UMem); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { free(vTMem); } free(Mij); @@ -108,8 +111,8 @@ namespace cytnx { const cytnx_int64 &N) { // char jobu, jobv; - // jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - // jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + // jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + // jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; lapack_int min = std::min(M, N); lapack_int max = std::max(M, N); @@ -117,32 +120,32 @@ namespace cytnx { lapack_int info; cytnx_double *Mij = (cytnx_double *)malloc(M * N * sizeof(cytnx_double)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_double)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_double)); char jobz = 'S'; - if (U->dtype == Type.Void and vT->dtype == Type.Void) { + if (U->dtype() == Type.Void and vT->dtype() == Type.Void) { jobz = 'N'; } void *UMem = - (U->Mem ? U->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_double)) : NULL)); + (U->data() ? U->data() : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_double)) : NULL)); void *vTMem = - (vT->Mem ? vT->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_double)) : NULL)); + (vT->data() ? vT->data() : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_double)) : NULL)); // double *superb = (double *)malloc(sizeof(double) * (min - 1)); // info = LAPACKE_dgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, Mij, ldA, (cytnx_double *)S->Mem, // (cytnx_double *)vT->Mem, ldu, (cytnx_double *)U->Mem, ldvT, superb); info = LAPACKE_dgesdd(LAPACK_COL_MAJOR, jobz, N, M, (cytnx_double *)Mij, ldA, - (cytnx_double *)S->Mem, (cytnx_double *)vTMem, ldu, + (cytnx_double *)S->data(), (cytnx_double *)vTMem, ldu, (cytnx_double *)UMem, ldvT); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dgesvd': Lapack INFO = ", info); // free(superb); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { free(UMem); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { free(vTMem); } free(Mij); @@ -154,8 +157,8 @@ namespace cytnx { const cytnx_int64 &N) { // char jobu, jobv; - // jobu = (U->dtype == Type.Void) ? 'N' : 'S'; - // jobv = (vT->dtype == Type.Void) ? 'N' : 'S'; + // jobu = (U->dtype() == Type.Void) ? 'N' : 'S'; + // jobv = (vT->dtype() == Type.Void) ? 'N' : 'S'; lapack_int min = std::min(M, N); lapack_int max = std::max(M, N); @@ -163,32 +166,32 @@ namespace cytnx { lapack_int info; cytnx_float *Mij = (cytnx_float *)malloc(M * N * sizeof(cytnx_float)); - memcpy(Mij, in->Mem, M * N * sizeof(cytnx_float)); + memcpy(Mij, in->data(), M * N * sizeof(cytnx_float)); char jobz = 'S'; - if (U->dtype == Type.Void and vT->dtype == Type.Void) { + if (U->dtype() == Type.Void and vT->dtype() == Type.Void) { jobz = 'N'; } void *UMem = - (U->Mem ? U->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_float)) : NULL)); + (U->data() ? U->data() : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_float)) : NULL)); void *vTMem = - (vT->Mem ? vT->Mem : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_float)) : NULL)); + (vT->data() ? vT->data() : (jobz == 'S' ? malloc(max * max * sizeof(cytnx_float)) : NULL)); // double *superb = (double *)malloc(sizeof(double) * (min - 1)); // info = LAPACKE_dgesvd(LAPACK_COL_MAJOR, jobv, jobu, N, M, Mij, ldA, (cytnx_double *)S->Mem, // (cytnx_double *)vT->Mem, ldu, (cytnx_double *)U->Mem, ldvT, superb); - info = - LAPACKE_sgesdd(LAPACK_COL_MAJOR, jobz, N, M, (cytnx_float *)Mij, ldA, (cytnx_float *)S->Mem, - (cytnx_float *)vTMem, ldu, (cytnx_float *)UMem, ldvT); + info = LAPACKE_sgesdd(LAPACK_COL_MAJOR, jobz, N, M, (cytnx_float *)Mij, ldA, + (cytnx_float *)S->data(), (cytnx_float *)vTMem, ldu, + (cytnx_float *)UMem, ldvT); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'dgesvd': Lapack INFO = ", info); // free(superb); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { free(UMem); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { free(vTMem); } free(Mij); diff --git a/src/backend/linalg_internal_cpu/Sub_internal.cpp b/src/backend/linalg_internal_cpu/Sub_internal.cpp index 97610659..e618b71a 100644 --- a/src/backend/linalg_internal_cpu/Sub_internal.cpp +++ b/src/backend/linalg_internal_cpu/Sub_internal.cpp @@ -16,9 +16,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len; i++) { @@ -67,9 +67,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -119,9 +119,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -175,9 +175,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -231,9 +231,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -287,9 +287,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -343,9 +343,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -399,9 +399,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -455,9 +455,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -511,9 +511,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -567,9 +567,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -622,9 +622,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -674,9 +674,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -726,9 +726,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -782,9 +782,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -838,9 +838,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -894,9 +894,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -950,9 +950,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1006,9 +1006,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1062,9 +1062,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1118,9 +1118,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1174,9 +1174,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1228,9 +1228,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); // std::cout << _Rin[0] << std::endl; if (Lin->size() == 1) { @@ -1285,9 +1285,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1341,9 +1341,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1393,9 +1393,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1445,9 +1445,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1497,9 +1497,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1549,9 +1549,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1601,9 +1601,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1653,9 +1653,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1705,9 +1705,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1757,9 +1757,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1810,9 +1810,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1866,9 +1866,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1922,9 +1922,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -1974,9 +1974,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2026,9 +2026,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2078,9 +2078,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2130,9 +2130,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2182,9 +2182,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2234,9 +2234,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2286,9 +2286,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2338,9 +2338,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2392,9 +2392,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2448,9 +2448,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2504,9 +2504,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2556,9 +2556,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2608,9 +2608,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2660,9 +2660,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2712,9 +2712,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2764,9 +2764,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2816,9 +2816,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2868,9 +2868,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2920,9 +2920,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -2974,9 +2974,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3030,9 +3030,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3086,9 +3086,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3138,9 +3138,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3190,9 +3190,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3242,9 +3242,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3294,9 +3294,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3346,9 +3346,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3398,9 +3398,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3450,9 +3450,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3502,9 +3502,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3555,9 +3555,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3611,9 +3611,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3667,9 +3667,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3719,9 +3719,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3771,9 +3771,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3823,9 +3823,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3875,9 +3875,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3927,9 +3927,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -3979,9 +3979,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4031,9 +4031,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4083,9 +4083,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4137,9 +4137,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4193,9 +4193,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4249,9 +4249,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4301,9 +4301,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4353,9 +4353,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4405,9 +4405,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4457,9 +4457,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4509,9 +4509,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4561,9 +4561,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4613,9 +4613,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4665,9 +4665,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4718,9 +4718,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4774,9 +4774,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4830,9 +4830,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4882,9 +4882,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4934,9 +4934,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -4986,9 +4986,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5038,9 +5038,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5090,9 +5090,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5142,9 +5142,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5194,9 +5194,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5246,9 +5246,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5299,9 +5299,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5355,9 +5355,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5411,9 +5411,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5463,9 +5463,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5515,9 +5515,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5567,9 +5567,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5619,9 +5619,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5671,9 +5671,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5723,9 +5723,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5775,9 +5775,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5827,9 +5827,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5880,9 +5880,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5933,9 +5933,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -5986,9 +5986,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6038,9 +6038,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6090,9 +6090,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6142,9 +6142,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6194,9 +6194,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6246,9 +6246,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6298,9 +6298,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6350,9 +6350,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) @@ -6402,9 +6402,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Lin->size() == 1) { #pragma omp parallel for schedule(dynamic) diff --git a/src/backend/linalg_internal_cpu/Sum_internal.cpp b/src/backend/linalg_internal_cpu/Sum_internal.cpp index 3ae5c832..d7fc37a3 100644 --- a/src/backend/linalg_internal_cpu/Sum_internal.cpp +++ b/src/backend/linalg_internal_cpu/Sum_internal.cpp @@ -15,8 +15,8 @@ namespace cytnx { void Sum_internal_u64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_uint64 *_ten = (cytnx_uint64 *)ten->Mem; - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; + cytnx_uint64 *_ten = (cytnx_uint64 *)ten->data(); + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); #ifdef UNI_OMP vector buf; @@ -48,8 +48,8 @@ namespace cytnx { void Sum_internal_i64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_int64 *_ten = (cytnx_int64 *)ten->Mem; - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; + cytnx_int64 *_ten = (cytnx_int64 *)ten->data(); + cytnx_int64 *_out = (cytnx_int64 *)out->data(); #ifdef UNI_OMP vector buf; @@ -81,8 +81,8 @@ namespace cytnx { void Sum_internal_u32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_uint32 *_ten = (cytnx_uint32 *)ten->Mem; - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; + cytnx_uint32 *_ten = (cytnx_uint32 *)ten->data(); + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); #ifdef UNI_OMP vector buf; @@ -114,8 +114,8 @@ namespace cytnx { void Sum_internal_i32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_int32 *_ten = (cytnx_int32 *)ten->Mem; - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; + cytnx_int32 *_ten = (cytnx_int32 *)ten->data(); + cytnx_int32 *_out = (cytnx_int32 *)out->data(); #ifdef UNI_OMP vector buf; @@ -147,8 +147,8 @@ namespace cytnx { void Sum_internal_u16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_uint16 *_ten = (cytnx_uint16 *)ten->Mem; - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; + cytnx_uint16 *_ten = (cytnx_uint16 *)ten->data(); + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); #ifdef UNI_OMP vector buf; @@ -180,8 +180,8 @@ namespace cytnx { void Sum_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_int16 *_ten = (cytnx_int16 *)ten->Mem; - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; + cytnx_int16 *_ten = (cytnx_int16 *)ten->data(); + cytnx_int16 *_out = (cytnx_int16 *)out->data(); #ifdef UNI_OMP vector buf; @@ -213,8 +213,8 @@ namespace cytnx { void Sum_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_double *_ten = (cytnx_double *)ten->Mem; - cytnx_double *_out = (cytnx_double *)out->Mem; + cytnx_double *_ten = (cytnx_double *)ten->data(); + cytnx_double *_out = (cytnx_double *)out->data(); #ifdef UNI_OMP vector buf; @@ -246,8 +246,8 @@ namespace cytnx { void Sum_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_float *_ten = (cytnx_float *)ten->Mem; - cytnx_float *_out = (cytnx_float *)out->Mem; + cytnx_float *_ten = (cytnx_float *)ten->data(); + cytnx_float *_out = (cytnx_float *)out->data(); #ifdef UNI_OMP vector buf; @@ -278,8 +278,8 @@ namespace cytnx { void Sum_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_complex128 *_ten = (cytnx_complex128 *)ten->Mem; - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; + cytnx_complex128 *_ten = (cytnx_complex128 *)ten->data(); + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); #ifdef UNI_OMP vector buf; @@ -311,8 +311,8 @@ namespace cytnx { void Sum_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - cytnx_complex64 *_ten = (cytnx_complex64 *)ten->Mem; - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; + cytnx_complex64 *_ten = (cytnx_complex64 *)ten->data(); + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); #ifdef UNI_OMP vector buf; diff --git a/src/backend/linalg_internal_cpu/Tridiag_internal.cpp b/src/backend/linalg_internal_cpu/Tridiag_internal.cpp index 6791a1f6..ac7d4bc9 100644 --- a/src/backend/linalg_internal_cpu/Tridiag_internal.cpp +++ b/src/backend/linalg_internal_cpu/Tridiag_internal.cpp @@ -12,25 +12,25 @@ namespace cytnx { boost::intrusive_ptr &U, const cytnx_int64 &L, bool throw_excp /*= false*/) { char job; - job = (U->dtype == Type.Void) ? 'N' : 'V'; + job = (U->dtype() == Type.Void) ? 'N' : 'V'; // std::cout << L << std::endl; // copy from in to S[out] - memcpy(S->Mem, diag->Mem, L * sizeof(cytnx_double)); + memcpy(S->data(), diag->data(), L * sizeof(cytnx_double)); // create tmp for sub-diag and cpy in: cytnx_double *Dsv = (cytnx_double *)malloc((L - 1) * sizeof(cytnx_double)); - memcpy(Dsv, s_diag->Mem, (L - 1) * sizeof(cytnx_double)); + memcpy(Dsv, s_diag->data(), (L - 1) * sizeof(cytnx_double)); lapack_int ldz = 1; lapack_int info; // check if compute eigV - if (U->dtype != Type.Void) { + if (U->dtype() != Type.Void) { ldz = L; } - info = LAPACKE_dstev(LAPACK_COL_MAJOR, job, L, (cytnx_double *)S->Mem, Dsv, - (cytnx_double *)U->Mem, ldz); + info = LAPACKE_dstev(LAPACK_COL_MAJOR, job, L, (cytnx_double *)S->data(), Dsv, + (cytnx_double *)U->data(), ldz); // std::cout << L << std::endl; if (!throw_excp and info != 0) { cytnx_error_msg(info != 0, "%s %d", @@ -48,25 +48,25 @@ namespace cytnx { boost::intrusive_ptr &U, const cytnx_int64 &L, bool throw_excp /*= false*/) { char job; - job = (U->dtype == Type.Void) ? 'N' : 'V'; + job = (U->dtype() == Type.Void) ? 'N' : 'V'; // std::cout << L << std::endl; // copy from in to S[out] - memcpy(S->Mem, diag->Mem, L * sizeof(cytnx_float)); + memcpy(S->data(), diag->data(), L * sizeof(cytnx_float)); // create tmp for sub-diag and cpy in: cytnx_float *Dsv = (cytnx_float *)malloc((L - 1) * sizeof(cytnx_float)); - memcpy(Dsv, s_diag->Mem, (L - 1) * sizeof(cytnx_float)); + memcpy(Dsv, s_diag->data(), (L - 1) * sizeof(cytnx_float)); lapack_int ldz = 1; lapack_int info; // check if compute eigV - if (U->dtype != Type.Void) { + if (U->dtype() != Type.Void) { ldz = L; } - info = LAPACKE_sstev(LAPACK_COL_MAJOR, job, L, (cytnx_float *)S->Mem, Dsv, - (cytnx_float *)U->Mem, ldz); + info = LAPACKE_sstev(LAPACK_COL_MAJOR, job, L, (cytnx_float *)S->data(), Dsv, + (cytnx_float *)U->data(), ldz); cytnx_error_msg(info != 0, "%s %d", "Error in Lapack function 'sstev': Lapack INFO = ", info); // house keeping diff --git a/src/backend/linalg_internal_cpu/Vectordot_internal.cpp b/src/backend/linalg_internal_cpu/Vectordot_internal.cpp index 15c4b739..c7896a6a 100644 --- a/src/backend/linalg_internal_cpu/Vectordot_internal.cpp +++ b/src/backend/linalg_internal_cpu/Vectordot_internal.cpp @@ -15,9 +15,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_complex128 *_out = (cytnx_complex128 *)out->Mem; - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_out = (cytnx_complex128 *)out->data(); + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); _out[0] = 0; unsigned long long remain = len; @@ -55,9 +55,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_complex64 *_out = (cytnx_complex64 *)out->Mem; - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_out = (cytnx_complex64 *)out->data(); + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); _out[0] = 0; unsigned long long remain = len; unsigned long long bias = 0; @@ -94,9 +94,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); _out[0] = 0; unsigned long long remain = len; @@ -127,9 +127,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); _out[0] = 0; unsigned long long remain = len; @@ -160,9 +160,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); #ifdef UNI_OMP std::vector tmp; @@ -191,9 +191,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); #ifdef UNI_OMP std::vector tmp; @@ -222,9 +222,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - blas_int *_out = (blas_int *)out->Mem; - blas_int *_Lin = (blas_int *)Lin->Mem; - blas_int *_Rin = (blas_int *)Rin->Mem; + blas_int *_out = (blas_int *)out->data(); + blas_int *_Lin = (blas_int *)Lin->data(); + blas_int *_Rin = (blas_int *)Rin->data(); #ifdef UNI_OMP std::vector tmp; @@ -253,9 +253,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); #ifdef UNI_OMP std::vector tmp; @@ -284,9 +284,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); #ifdef UNI_OMP std::vector tmp; @@ -315,9 +315,9 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const unsigned long long &len, const bool &is_conj) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); #ifdef UNI_OMP std::vector tmp; diff --git a/src/backend/linalg_internal_cpu/iAdd_internal.cpp b/src/backend/linalg_internal_cpu/iAdd_internal.cpp index e1511c8a..1f3afefd 100644 --- a/src/backend/linalg_internal_cpu/iAdd_internal.cpp +++ b/src/backend/linalg_internal_cpu/iAdd_internal.cpp @@ -62,8 +62,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -81,8 +81,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -100,8 +100,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -119,8 +119,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -138,8 +138,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -157,8 +157,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -176,8 +176,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -195,8 +195,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -214,8 +214,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -233,8 +233,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -252,8 +252,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -273,8 +273,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -292,8 +292,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -311,8 +311,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -330,8 +330,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -349,8 +349,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -368,8 +368,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -387,8 +387,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -406,8 +406,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -425,8 +425,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -444,8 +444,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -463,8 +463,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -500,8 +500,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -519,8 +519,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -538,8 +538,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -557,8 +557,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -576,8 +576,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -595,8 +595,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -614,8 +614,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -633,8 +633,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -652,8 +652,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -688,8 +688,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -707,8 +707,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -726,8 +726,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -745,8 +745,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -764,8 +764,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -783,8 +783,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -802,8 +802,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -821,8 +821,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -840,8 +840,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -877,8 +877,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -896,8 +896,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -916,8 +916,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -936,8 +936,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -956,8 +956,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -976,8 +976,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -996,8 +996,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1016,8 +1016,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1035,8 +1035,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1072,8 +1072,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1091,8 +1091,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1111,8 +1111,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1131,8 +1131,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1151,8 +1151,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1171,8 +1171,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1191,8 +1191,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1211,8 +1211,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1230,8 +1230,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1267,8 +1267,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1286,8 +1286,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1306,8 +1306,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1326,8 +1326,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1346,8 +1346,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1366,8 +1366,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1386,8 +1386,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1406,8 +1406,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1425,8 +1425,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1462,8 +1462,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1481,8 +1481,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1501,8 +1501,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1521,8 +1521,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1541,8 +1541,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1561,8 +1561,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1581,8 +1581,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1601,8 +1601,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1620,8 +1620,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1657,8 +1657,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1676,8 +1676,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1696,8 +1696,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1716,8 +1716,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1736,8 +1736,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1756,8 +1756,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1776,8 +1776,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1796,8 +1796,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1815,8 +1815,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1852,8 +1852,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1871,8 +1871,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1891,8 +1891,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1911,8 +1911,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1931,8 +1931,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1951,8 +1951,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1971,8 +1971,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -1991,8 +1991,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2010,8 +2010,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2047,8 +2047,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2066,8 +2066,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2085,8 +2085,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2104,8 +2104,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2123,8 +2123,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (shape.size() == 0) { // contiguous: _kernel_conti_iadd(_Lin, _Rin, len); } else { @@ -2137,8 +2137,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2156,8 +2156,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2175,8 +2175,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); @@ -2194,8 +2194,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_iadd(_Lin, _Rin, len); diff --git a/src/backend/linalg_internal_cpu/iDiv_internal.cpp b/src/backend/linalg_internal_cpu/iDiv_internal.cpp index 8561c243..a3d4edaf 100644 --- a/src/backend/linalg_internal_cpu/iDiv_internal.cpp +++ b/src/backend/linalg_internal_cpu/iDiv_internal.cpp @@ -62,8 +62,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -81,8 +81,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -100,8 +100,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -119,8 +119,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -138,8 +138,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -157,8 +157,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -176,8 +176,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -195,8 +195,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -214,8 +214,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -233,8 +233,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -252,8 +252,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -273,8 +273,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -292,8 +292,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -311,8 +311,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -330,8 +330,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -349,8 +349,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -368,8 +368,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -387,8 +387,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -406,8 +406,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -425,8 +425,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -444,8 +444,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -463,8 +463,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -500,8 +500,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -519,8 +519,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -538,8 +538,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -557,8 +557,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -576,8 +576,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -595,8 +595,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -614,8 +614,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -633,8 +633,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -652,8 +652,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -688,8 +688,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -707,8 +707,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -726,8 +726,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -745,8 +745,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -764,8 +764,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -783,8 +783,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -802,8 +802,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -821,8 +821,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -840,8 +840,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -877,8 +877,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -896,8 +896,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -916,8 +916,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -936,8 +936,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -956,8 +956,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -976,8 +976,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -996,8 +996,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1016,8 +1016,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1035,8 +1035,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1072,8 +1072,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1091,8 +1091,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1111,8 +1111,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1131,8 +1131,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1151,8 +1151,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1171,8 +1171,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1191,8 +1191,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1211,8 +1211,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1230,8 +1230,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1267,8 +1267,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1286,8 +1286,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1306,8 +1306,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1326,8 +1326,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1346,8 +1346,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1366,8 +1366,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1386,8 +1386,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1406,8 +1406,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1425,8 +1425,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1462,8 +1462,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1481,8 +1481,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1501,8 +1501,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1521,8 +1521,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1541,8 +1541,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1561,8 +1561,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1581,8 +1581,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1601,8 +1601,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1620,8 +1620,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1657,8 +1657,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1676,8 +1676,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1696,8 +1696,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1716,8 +1716,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1736,8 +1736,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1756,8 +1756,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1776,8 +1776,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1796,8 +1796,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1815,8 +1815,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1852,8 +1852,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1871,8 +1871,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1891,8 +1891,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1911,8 +1911,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1931,8 +1931,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1951,8 +1951,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1971,8 +1971,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -1991,8 +1991,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2010,8 +2010,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2047,8 +2047,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2066,8 +2066,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2085,8 +2085,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2104,8 +2104,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2123,8 +2123,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (shape.size() == 0) { // contiguous: _kernel_conti_idiv(_Lin, _Rin, len); } else { @@ -2137,8 +2137,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2156,8 +2156,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2175,8 +2175,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); @@ -2194,8 +2194,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_idiv(_Lin, _Rin, len); diff --git a/src/backend/linalg_internal_cpu/iMul_internal.cpp b/src/backend/linalg_internal_cpu/iMul_internal.cpp index 45b09698..7d87912c 100644 --- a/src/backend/linalg_internal_cpu/iMul_internal.cpp +++ b/src/backend/linalg_internal_cpu/iMul_internal.cpp @@ -62,8 +62,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -81,8 +81,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -100,8 +100,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -119,8 +119,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -138,8 +138,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -157,8 +157,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -176,8 +176,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -195,8 +195,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -214,8 +214,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -233,8 +233,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -252,8 +252,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -273,8 +273,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -292,8 +292,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -311,8 +311,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -330,8 +330,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -349,8 +349,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -368,8 +368,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -387,8 +387,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -406,8 +406,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -425,8 +425,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -444,8 +444,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -463,8 +463,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -500,8 +500,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -519,8 +519,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -538,8 +538,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -557,8 +557,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -576,8 +576,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -595,8 +595,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -614,8 +614,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -633,8 +633,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -652,8 +652,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -688,8 +688,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -707,8 +707,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -726,8 +726,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -745,8 +745,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -764,8 +764,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -783,8 +783,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -802,8 +802,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -821,8 +821,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -840,8 +840,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -877,8 +877,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -896,8 +896,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -916,8 +916,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -936,8 +936,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -956,8 +956,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -976,8 +976,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -996,8 +996,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1016,8 +1016,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1035,8 +1035,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1072,8 +1072,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1091,8 +1091,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1111,8 +1111,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1131,8 +1131,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1151,8 +1151,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1171,8 +1171,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1191,8 +1191,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1211,8 +1211,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1230,8 +1230,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1267,8 +1267,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1286,8 +1286,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1306,8 +1306,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1326,8 +1326,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1346,8 +1346,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1366,8 +1366,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1386,8 +1386,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1406,8 +1406,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1425,8 +1425,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1462,8 +1462,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1481,8 +1481,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1501,8 +1501,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1521,8 +1521,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1541,8 +1541,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1561,8 +1561,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1581,8 +1581,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1601,8 +1601,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1620,8 +1620,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1657,8 +1657,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1676,8 +1676,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1696,8 +1696,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1716,8 +1716,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1736,8 +1736,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1756,8 +1756,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1776,8 +1776,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1796,8 +1796,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1815,8 +1815,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1852,8 +1852,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1871,8 +1871,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1891,8 +1891,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1911,8 +1911,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1931,8 +1931,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1951,8 +1951,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1971,8 +1971,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -1991,8 +1991,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2010,8 +2010,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2047,8 +2047,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2066,8 +2066,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2085,8 +2085,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2104,8 +2104,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2123,8 +2123,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (shape.size() == 0) { // contiguous: _kernel_conti_imul(_Lin, _Rin, len); } else { @@ -2137,8 +2137,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2156,8 +2156,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2175,8 +2175,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); @@ -2194,8 +2194,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_imul(_Lin, _Rin, len); diff --git a/src/backend/linalg_internal_cpu/iSub_internal.cpp b/src/backend/linalg_internal_cpu/iSub_internal.cpp index 36e1e368..2a2e8f33 100644 --- a/src/backend/linalg_internal_cpu/iSub_internal.cpp +++ b/src/backend/linalg_internal_cpu/iSub_internal.cpp @@ -62,8 +62,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -81,8 +81,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -100,8 +100,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -119,8 +119,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -138,8 +138,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -157,8 +157,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -176,8 +176,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -195,8 +195,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -214,8 +214,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -233,8 +233,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -252,8 +252,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex128 *_Lin = (cytnx_complex128 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -273,8 +273,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex128 *_Rin = (cytnx_complex128 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -292,8 +292,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_complex64 *_Rin = (cytnx_complex64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -311,8 +311,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -330,8 +330,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -349,8 +349,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -368,8 +368,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -387,8 +387,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -406,8 +406,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -425,8 +425,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -444,8 +444,8 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -463,8 +463,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_complex64 *_Lin = (cytnx_complex64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -500,8 +500,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -519,8 +519,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -538,8 +538,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -557,8 +557,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -576,8 +576,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -595,8 +595,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -614,8 +614,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -633,8 +633,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -652,8 +652,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -688,8 +688,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -707,8 +707,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -726,8 +726,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -745,8 +745,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -764,8 +764,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -783,8 +783,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -802,8 +802,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -821,8 +821,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -840,8 +840,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -877,8 +877,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -896,8 +896,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -916,8 +916,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -936,8 +936,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -956,8 +956,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -976,8 +976,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -996,8 +996,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1016,8 +1016,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1035,8 +1035,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1072,8 +1072,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1091,8 +1091,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1111,8 +1111,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1131,8 +1131,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1151,8 +1151,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1171,8 +1171,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1191,8 +1191,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1211,8 +1211,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1230,8 +1230,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1267,8 +1267,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1286,8 +1286,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1306,8 +1306,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1326,8 +1326,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1346,8 +1346,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1366,8 +1366,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1386,8 +1386,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1406,8 +1406,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1425,8 +1425,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1462,8 +1462,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1481,8 +1481,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1501,8 +1501,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1521,8 +1521,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1541,8 +1541,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1561,8 +1561,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1581,8 +1581,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1601,8 +1601,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1620,8 +1620,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1657,8 +1657,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1676,8 +1676,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1696,8 +1696,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1716,8 +1716,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1736,8 +1736,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1756,8 +1756,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1776,8 +1776,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1796,8 +1796,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1815,8 +1815,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1852,8 +1852,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1871,8 +1871,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1891,8 +1891,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1911,8 +1911,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1931,8 +1931,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1951,8 +1951,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1971,8 +1971,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -1991,8 +1991,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2010,8 +2010,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2047,8 +2047,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2066,8 +2066,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2085,8 +2085,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2104,8 +2104,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2123,8 +2123,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); if (shape.size() == 0) { // contiguous: _kernel_conti_isub(_Lin, _Rin, len); } else { @@ -2137,8 +2137,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2156,8 +2156,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2175,8 +2175,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); @@ -2194,8 +2194,8 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); if (Rin->size() == 1) { _kernel_const_isub(_Lin, _Rin, len); diff --git a/src/backend/linalg_internal_cpu/memcpyTruncation.cpp b/src/backend/linalg_internal_cpu/memcpyTruncation.cpp index 9f1114a2..248d5f80 100644 --- a/src/backend/linalg_internal_cpu/memcpyTruncation.cpp +++ b/src/backend/linalg_internal_cpu/memcpyTruncation.cpp @@ -22,7 +22,8 @@ namespace cytnx { } cytnx_uint64 trunc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and trunc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + trunc_dim - 1 >= mindim) { trunc_dim--; } else { break; @@ -35,8 +36,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({trunc_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, trunc_dim * sizeof(cytnx_double)); + memcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), trunc_dim * sizeof(cytnx_double)); if (is_U) { Tensor newU = Tensor({U.shape()[0], trunc_dim}, U.dtype(), U.device()); @@ -44,8 +45,8 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - memcpy((cytnx_complex128 *)newU._impl->storage()._impl->Mem + src, - (cytnx_complex128 *)U._impl->storage()._impl->Mem + dest, + memcpy((cytnx_complex128 *)newU._impl->storage()._impl->data() + src, + (cytnx_complex128 *)U._impl->storage()._impl->data() + dest, trunc_dim * sizeof(cytnx_complex128)); src += trunc_dim; dest += U.shape()[1]; @@ -55,21 +56,21 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({trunc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - memcpy((cytnx_complex128 *)newvT._impl->storage()._impl->Mem, - (cytnx_complex128 *)vT._impl->storage()._impl->Mem, + memcpy((cytnx_complex128 *)newvT._impl->storage()._impl->data(), + (cytnx_complex128 *)vT._impl->storage()._impl->data(), vT.shape()[1] * trunc_dim * sizeof(cytnx_complex128)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[trunc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[trunc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - trunc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + trunc_dim, + memcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + trunc_dim, discared_dim * sizeof(cytnx_double)); terr = newterr; } @@ -89,7 +90,8 @@ namespace cytnx { } cytnx_uint64 trunc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and trunc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + trunc_dim - 1 >= mindim) { trunc_dim--; } else { break; @@ -102,8 +104,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({trunc_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, trunc_dim * sizeof(cytnx_double)); + memcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), trunc_dim * sizeof(cytnx_double)); if (is_U) { Tensor newU = Tensor({U.shape()[0], trunc_dim}, U.dtype(), U.device()); @@ -111,8 +113,8 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - memcpy((cytnx_complex64 *)newU._impl->storage()._impl->Mem + src, - (cytnx_complex64 *)U._impl->storage()._impl->Mem + dest, + memcpy((cytnx_complex64 *)newU._impl->storage()._impl->data() + src, + (cytnx_complex64 *)U._impl->storage()._impl->data() + dest, trunc_dim * sizeof(cytnx_complex64)); src += trunc_dim; dest += U.shape()[1]; @@ -122,21 +124,21 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({trunc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - memcpy((cytnx_complex64 *)newvT._impl->storage()._impl->Mem, - (cytnx_complex64 *)vT._impl->storage()._impl->Mem, + memcpy((cytnx_complex64 *)newvT._impl->storage()._impl->data(), + (cytnx_complex64 *)vT._impl->storage()._impl->data(), vT.shape()[1] * trunc_dim * sizeof(cytnx_complex64)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[trunc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[trunc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - trunc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + trunc_dim, + memcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + trunc_dim, discared_dim * sizeof(cytnx_double)); terr = newterr; } @@ -156,7 +158,8 @@ namespace cytnx { } cytnx_uint64 trunc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and trunc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + trunc_dim - 1 >= mindim) { trunc_dim--; } else { break; @@ -169,8 +172,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({trunc_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, trunc_dim * sizeof(cytnx_double)); + memcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), trunc_dim * sizeof(cytnx_double)); if (is_U) { Tensor newU = Tensor({U.shape()[0], trunc_dim}, U.dtype(), U.device()); @@ -178,8 +181,8 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - memcpy((cytnx_double *)newU._impl->storage()._impl->Mem + src, - (cytnx_double *)U._impl->storage()._impl->Mem + dest, + memcpy((cytnx_double *)newU._impl->storage()._impl->data() + src, + (cytnx_double *)U._impl->storage()._impl->data() + dest, trunc_dim * sizeof(cytnx_double)); src += trunc_dim; dest += U.shape()[1]; @@ -189,21 +192,21 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({trunc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - memcpy((cytnx_double *)newvT._impl->storage()._impl->Mem, - (cytnx_double *)vT._impl->storage()._impl->Mem, + memcpy((cytnx_double *)newvT._impl->storage()._impl->data(), + (cytnx_double *)vT._impl->storage()._impl->data(), vT.shape()[1] * trunc_dim * sizeof(cytnx_double)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[trunc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[trunc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - trunc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + trunc_dim, + memcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + trunc_dim, discared_dim * sizeof(cytnx_double)); terr = newterr; } @@ -223,7 +226,8 @@ namespace cytnx { } cytnx_uint64 trunc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and trunc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + trunc_dim - 1 >= mindim) { trunc_dim--; } else { break; @@ -236,8 +240,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({trunc_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, trunc_dim * sizeof(cytnx_double)); + memcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), trunc_dim * sizeof(cytnx_double)); if (is_U) { Tensor newU = Tensor({U.shape()[0], trunc_dim}, U.dtype(), U.device()); @@ -245,8 +249,8 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - memcpy((cytnx_float *)newU._impl->storage()._impl->Mem + src, - (cytnx_float *)U._impl->storage()._impl->Mem + dest, + memcpy((cytnx_float *)newU._impl->storage()._impl->data() + src, + (cytnx_float *)U._impl->storage()._impl->data() + dest, trunc_dim * sizeof(cytnx_float)); src += trunc_dim; dest += U.shape()[1]; @@ -256,21 +260,21 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({trunc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - memcpy((cytnx_float *)newvT._impl->storage()._impl->Mem, - (cytnx_float *)vT._impl->storage()._impl->Mem, + memcpy((cytnx_float *)newvT._impl->storage()._impl->data(), + (cytnx_float *)vT._impl->storage()._impl->data(), vT.shape()[1] * trunc_dim * sizeof(cytnx_float)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[trunc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[trunc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - trunc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - memcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + trunc_dim, + memcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + trunc_dim, discared_dim * sizeof(cytnx_double)); terr = newterr; } diff --git a/src/backend/linalg_internal_gpu/cuAbs_internal.cu b/src/backend/linalg_internal_gpu/cuAbs_internal.cu index 5cf92815..3c978233 100644 --- a/src/backend/linalg_internal_gpu/cuAbs_internal.cu +++ b/src/backend/linalg_internal_gpu/cuAbs_internal.cu @@ -70,8 +70,8 @@ namespace cytnx { void cuAbs_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cuDoubleComplex *_ten = (cuDoubleComplex *)ten->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cuDoubleComplex *_ten = (cuDoubleComplex *)ten->data(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -82,8 +82,8 @@ namespace cytnx { void cuAbs_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cuFloatComplex *_ten = (cuFloatComplex *)ten->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cuFloatComplex *_ten = (cuFloatComplex *)ten->data(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -94,8 +94,8 @@ namespace cytnx { void cuAbs_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_ten = (cytnx_double *)ten->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_ten = (cytnx_double *)ten->data(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -106,8 +106,8 @@ namespace cytnx { void cuAbs_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_ten = (cytnx_float *)ten->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_ten = (cytnx_float *)ten->data(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -118,8 +118,8 @@ namespace cytnx { void cuAbs_internal_i64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_ten = (cytnx_int64 *)ten->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_ten = (cytnx_int64 *)ten->data(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -130,8 +130,8 @@ namespace cytnx { void cuAbs_internal_i32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_ten = (cytnx_int32 *)ten->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_ten = (cytnx_int32 *)ten->data(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -142,8 +142,8 @@ namespace cytnx { void cuAbs_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_ten = (cytnx_int16 *)ten->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_ten = (cytnx_int16 *)ten->data(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuAdd_internal.cu b/src/backend/linalg_internal_gpu/cuAdd_internal.cu index a48352cd..60a8e061 100644 --- a/src/backend/linalg_internal_gpu/cuAdd_internal.cu +++ b/src/backend/linalg_internal_gpu/cuAdd_internal.cu @@ -141,9 +141,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -263,9 +263,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -382,9 +382,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -501,9 +501,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -622,9 +622,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -742,9 +742,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -862,9 +862,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -982,9 +982,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1102,9 +1102,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1222,9 +1222,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1341,9 +1341,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1472,9 +1472,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1591,9 +1591,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1710,9 +1710,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1830,9 +1830,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1950,9 +1950,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2070,9 +2070,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2190,9 +2190,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2310,9 +2310,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2430,9 +2430,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2541,9 +2541,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2624,9 +2624,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2688,9 +2688,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2752,9 +2752,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2816,9 +2816,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2880,9 +2880,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2944,9 +2944,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3008,9 +3008,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3072,9 +3072,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3162,9 +3162,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3251,9 +3251,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3315,9 +3315,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3379,9 +3379,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3443,9 +3443,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3507,9 +3507,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3571,9 +3571,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3635,9 +3635,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3725,9 +3725,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3826,9 +3826,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3891,9 +3891,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3956,9 +3956,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4021,9 +4021,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4086,9 +4086,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4151,9 +4151,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4241,9 +4241,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4351,9 +4351,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4416,9 +4416,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4481,9 +4481,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4547,9 +4547,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4612,9 +4612,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4702,9 +4702,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4821,9 +4821,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4886,9 +4886,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4951,9 +4951,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5016,9 +5016,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5106,9 +5106,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5234,9 +5234,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5299,9 +5299,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5364,9 +5364,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5454,9 +5454,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5591,9 +5591,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5656,9 +5656,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5746,9 +5746,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5892,9 +5892,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5982,9 +5982,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6129,9 +6129,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuConj_inplace_internal.cu b/src/backend/linalg_internal_gpu/cuConj_inplace_internal.cu index 80842f28..f143f7c5 100644 --- a/src/backend/linalg_internal_gpu/cuConj_inplace_internal.cu +++ b/src/backend/linalg_internal_gpu/cuConj_inplace_internal.cu @@ -35,14 +35,14 @@ namespace cytnx { const cytnx_uint64 &Nelem) { cytnx_uint32 NBlocks = Nelem / 256; if (Nelem % 256) NBlocks += 1; - cuConj_inplace_kernel<<>>((cuDoubleComplex *)ten->Mem, Nelem); + cuConj_inplace_kernel<<>>((cuDoubleComplex *)ten->data(), Nelem); } void cuConj_inplace_internal_cf(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem) { cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; - cuConj_inplace_kernel<<>>((cuFloatComplex *)ten->Mem, Nelem); + cuConj_inplace_kernel<<>>((cuFloatComplex *)ten->data(), Nelem); } } // namespace linalg_internal diff --git a/src/backend/linalg_internal_gpu/cuCpr_internal.cu b/src/backend/linalg_internal_gpu/cuCpr_internal.cu index 77c8f09f..1b227230 100644 --- a/src/backend/linalg_internal_gpu/cuCpr_internal.cu +++ b/src/backend/linalg_internal_gpu/cuCpr_internal.cu @@ -74,9 +74,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -121,9 +121,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -168,9 +168,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -215,9 +215,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -264,9 +264,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -312,9 +312,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -360,9 +360,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -408,9 +408,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -456,9 +456,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -504,9 +504,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -551,9 +551,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -607,9 +607,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -654,9 +654,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -701,9 +701,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -749,9 +749,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -797,9 +797,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -845,9 +845,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -893,9 +893,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -941,9 +941,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -989,9 +989,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1028,9 +1028,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1068,9 +1068,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1089,9 +1089,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1110,9 +1110,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1131,9 +1131,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1152,9 +1152,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1173,9 +1173,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1194,9 +1194,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1215,9 +1215,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1255,9 +1255,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1301,9 +1301,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1322,9 +1322,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1343,9 +1343,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1364,9 +1364,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1385,9 +1385,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1406,9 +1406,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1427,9 +1427,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1467,9 +1467,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1525,9 +1525,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1547,9 +1547,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1569,9 +1569,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1591,9 +1591,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1613,9 +1613,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1635,9 +1635,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1675,9 +1675,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1742,9 +1742,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1764,9 +1764,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1786,9 +1786,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1809,9 +1809,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1831,9 +1831,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1871,9 +1871,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1947,9 +1947,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1969,9 +1969,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1991,9 +1991,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2013,9 +2013,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2053,9 +2053,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2138,9 +2138,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2160,9 +2160,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2182,9 +2182,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2222,9 +2222,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2316,9 +2316,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2338,9 +2338,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2378,9 +2378,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2481,9 +2481,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2521,9 +2521,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2625,9 +2625,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuDet_internal.cu b/src/backend/linalg_internal_gpu/cuDet_internal.cu index 7cf02265..685df050 100644 --- a/src/backend/linalg_internal_gpu/cuDet_internal.cu +++ b/src/backend/linalg_internal_gpu/cuDet_internal.cu @@ -13,9 +13,9 @@ namespace cytnx { const cytnx_uint64& L) { cytnx_complex128* od = (cytnx_complex128*)out; // result on cpu! cuDoubleComplex* _in = (cuDoubleComplex*)utils_internal::cuMalloc_gpu( - in->len * sizeof(cuDoubleComplex)); // unify mem. - checkCudaErrors( - cudaMemcpy(_in, in->Mem, sizeof(cytnx_complex128) * in->len, cudaMemcpyDeviceToDevice)); + in->size() * sizeof(cuDoubleComplex)); // unify mem. + checkCudaErrors(cudaMemcpy(_in, in->data(), sizeof(cytnx_complex128) * in->size(), + cudaMemcpyDeviceToDevice)); cusolverDnHandle_t cusolverH; cusolverDnCreate(&cusolverH); @@ -58,9 +58,9 @@ namespace cytnx { const cytnx_uint64& L) { cytnx_complex64* od = (cytnx_complex64*)out; // result on cpu! cuFloatComplex* _in = (cuFloatComplex*)utils_internal::cuMalloc_gpu( - in->len * sizeof(cuFloatComplex)); // unify mem. - checkCudaErrors( - cudaMemcpy(_in, in->Mem, sizeof(cytnx_complex64) * in->len, cudaMemcpyDeviceToDevice)); + in->size() * sizeof(cuFloatComplex)); // unify mem. + checkCudaErrors(cudaMemcpy(_in, in->data(), sizeof(cytnx_complex64) * in->size(), + cudaMemcpyDeviceToDevice)); cusolverDnHandle_t cusolverH; cusolverDnCreate(&cusolverH); @@ -102,10 +102,10 @@ namespace cytnx { void cuDet_internal_d(void* out, const boost::intrusive_ptr& in, const cytnx_uint64& L) { cytnx_double* od = (cytnx_double*)out; // result on cpu! - cytnx_double* _in = - (cytnx_double*)utils_internal::cuMalloc_gpu(in->len * sizeof(cytnx_double)); // unify mem. + cytnx_double* _in = (cytnx_double*)utils_internal::cuMalloc_gpu( + in->size() * sizeof(cytnx_double)); // unify mem. checkCudaErrors( - cudaMemcpy(_in, in->Mem, sizeof(cytnx_double) * in->len, cudaMemcpyDeviceToDevice)); + cudaMemcpy(_in, in->data(), sizeof(cytnx_double) * in->size(), cudaMemcpyDeviceToDevice)); cusolverDnHandle_t cusolverH; cusolverDnCreate(&cusolverH); @@ -148,9 +148,9 @@ namespace cytnx { const cytnx_uint64& L) { cytnx_float* od = (cytnx_float*)out; // result on cpu! cytnx_float* _in = - (cytnx_float*)utils_internal::cuMalloc_gpu(in->len * sizeof(cytnx_float)); // unify mem. + (cytnx_float*)utils_internal::cuMalloc_gpu(in->size() * sizeof(cytnx_float)); // unify mem. checkCudaErrors( - cudaMemcpy(_in, in->Mem, sizeof(cytnx_float) * in->len, cudaMemcpyDeviceToDevice)); + cudaMemcpy(_in, in->data(), sizeof(cytnx_float) * in->size(), cudaMemcpyDeviceToDevice)); cusolverDnHandle_t cusolverH; cusolverDnCreate(&cusolverH); diff --git a/src/backend/linalg_internal_gpu/cuDiag_internal.cu b/src/backend/linalg_internal_gpu/cuDiag_internal.cu index 54fb69a6..1becaf43 100644 --- a/src/backend/linalg_internal_gpu/cuDiag_internal.cu +++ b/src/backend/linalg_internal_gpu/cuDiag_internal.cu @@ -40,10 +40,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_bool *)out->Mem, (cytnx_bool *)ten->Mem, L); + cuDiag_internal_kernel<<>>((cytnx_bool *)out->data(), + (cytnx_bool *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_bool *)out->Mem, - (cytnx_bool *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_bool *)out->data(), + (cytnx_bool *)ten->data(), L); } void cuDiag_internal_i16(boost::intrusive_ptr &out, @@ -52,11 +53,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_int16 *)out->Mem, (cytnx_int16 *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_int16 *)out->data(), + (cytnx_int16 *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_int16 *)out->Mem, - (cytnx_int16 *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_int16 *)out->data(), + (cytnx_int16 *)ten->data(), L); } void cuDiag_internal_u16(boost::intrusive_ptr &out, @@ -65,11 +66,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_uint16 *)out->Mem, (cytnx_uint16 *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_uint16 *)out->data(), + (cytnx_uint16 *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_uint16 *)out->Mem, - (cytnx_uint16 *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_uint16 *)out->data(), + (cytnx_uint16 *)ten->data(), L); } void cuDiag_internal_i32(boost::intrusive_ptr &out, @@ -78,11 +79,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_int32 *)out->Mem, (cytnx_int32 *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_int32 *)out->data(), + (cytnx_int32 *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_int32 *)out->Mem, - (cytnx_int32 *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_int32 *)out->data(), + (cytnx_int32 *)ten->data(), L); } void cuDiag_internal_u32(boost::intrusive_ptr &out, @@ -91,11 +92,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_uint32 *)out->Mem, (cytnx_uint32 *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_uint32 *)out->data(), + (cytnx_uint32 *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_uint32 *)out->Mem, - (cytnx_uint32 *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_uint32 *)out->data(), + (cytnx_uint32 *)ten->data(), L); } void cuDiag_internal_i64(boost::intrusive_ptr &out, @@ -104,11 +105,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_int64 *)out->Mem, (cytnx_int64 *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_int64 *)out->data(), + (cytnx_int64 *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_int64 *)out->Mem, - (cytnx_int64 *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_int64 *)out->data(), + (cytnx_int64 *)ten->data(), L); } void cuDiag_internal_u64(boost::intrusive_ptr &out, @@ -117,11 +118,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_uint64 *)out->Mem, (cytnx_uint64 *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_uint64 *)out->data(), + (cytnx_uint64 *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_uint64 *)out->Mem, - (cytnx_uint64 *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_uint64 *)out->data(), + (cytnx_uint64 *)ten->data(), L); } void cuDiag_internal_d(boost::intrusive_ptr &out, @@ -130,11 +131,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_double *)out->Mem, (cytnx_double *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_double *)out->data(), + (cytnx_double *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_double *)out->Mem, - (cytnx_double *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_double *)out->data(), + (cytnx_double *)ten->data(), L); } void cuDiag_internal_f(boost::intrusive_ptr &out, @@ -143,11 +144,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 512; if (L % 512) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cytnx_float *)out->Mem, (cytnx_float *)ten->Mem, - L); + cuDiag_internal_kernel<<>>((cytnx_float *)out->data(), + (cytnx_float *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cytnx_float *)out->Mem, - (cytnx_float *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cytnx_float *)out->data(), + (cytnx_float *)ten->data(), L); } void cuDiag_internal_cd(boost::intrusive_ptr &out, @@ -156,11 +157,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 256; if (L % 256) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cuDoubleComplex *)out->Mem, - (cuDoubleComplex *)ten->Mem, L); + cuDiag_internal_kernel<<>>((cuDoubleComplex *)out->data(), + (cuDoubleComplex *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cuDoubleComplex *)out->Mem, - (cuDoubleComplex *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cuDoubleComplex *)out->data(), + (cuDoubleComplex *)ten->data(), L); } void cuDiag_internal_cf(boost::intrusive_ptr &out, @@ -169,11 +170,11 @@ namespace cytnx { cytnx_uint32 NBlocks = L / 256; if (L % 256) NBlocks += 1; if (isrank2) - cuDiag_internal_kernel<<>>((cuFloatComplex *)out->Mem, - (cuFloatComplex *)ten->Mem, L); + cuDiag_internal_kernel<<>>((cuFloatComplex *)out->data(), + (cuFloatComplex *)ten->data(), L); else - cuDiag_internal_getdiag_kernel<<>>((cuFloatComplex *)out->Mem, - (cuFloatComplex *)ten->Mem, L); + cuDiag_internal_getdiag_kernel<<>>((cuFloatComplex *)out->data(), + (cuFloatComplex *)ten->data(), L); } } // namespace linalg_internal diff --git a/src/backend/linalg_internal_gpu/cuDiv_internal.cu b/src/backend/linalg_internal_gpu/cuDiv_internal.cu index de5b1343..0e4e1137 100644 --- a/src/backend/linalg_internal_gpu/cuDiv_internal.cu +++ b/src/backend/linalg_internal_gpu/cuDiv_internal.cu @@ -89,9 +89,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -145,9 +145,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -201,9 +201,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -257,9 +257,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -315,9 +315,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -372,9 +372,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -429,9 +429,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -486,9 +486,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -543,9 +543,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -600,9 +600,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -648,9 +648,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -705,9 +705,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -760,9 +760,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -816,9 +816,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -872,9 +872,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -929,9 +929,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -986,9 +986,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1043,9 +1043,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1100,9 +1100,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1157,9 +1157,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1214,9 +1214,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1270,9 +1270,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1328,9 +1328,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1384,9 +1384,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1408,9 +1408,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1431,9 +1431,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1454,9 +1454,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1477,9 +1477,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1500,9 +1500,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1523,9 +1523,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1546,9 +1546,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1569,9 +1569,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1618,9 +1618,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1675,9 +1675,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1731,9 +1731,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1755,9 +1755,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1778,9 +1778,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1801,9 +1801,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1824,9 +1824,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1847,9 +1847,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1870,9 +1870,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1893,9 +1893,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1916,9 +1916,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1964,9 +1964,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2022,9 +2022,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2079,9 +2079,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2103,9 +2103,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2126,9 +2126,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2150,9 +2150,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2174,9 +2174,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2198,9 +2198,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2222,9 +2222,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2246,9 +2246,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2270,9 +2270,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2319,9 +2319,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2378,9 +2378,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2435,9 +2435,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2458,9 +2458,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2481,9 +2481,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2505,9 +2505,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2529,9 +2529,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2553,9 +2553,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2577,9 +2577,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2601,9 +2601,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2625,9 +2625,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2674,9 +2674,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2733,9 +2733,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2790,9 +2790,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2813,9 +2813,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2836,9 +2836,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2860,9 +2860,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2884,9 +2884,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2908,9 +2908,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2932,9 +2932,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2956,9 +2956,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2980,9 +2980,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3029,9 +3029,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3088,9 +3088,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3145,9 +3145,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3168,9 +3168,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3191,9 +3191,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3215,9 +3215,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3239,9 +3239,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3263,9 +3263,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3287,9 +3287,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3311,9 +3311,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3335,9 +3335,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3384,9 +3384,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3443,9 +3443,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3500,9 +3500,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3523,9 +3523,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3546,9 +3546,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3570,9 +3570,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3594,9 +3594,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3618,9 +3618,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3642,9 +3642,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3666,9 +3666,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3690,9 +3690,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3739,9 +3739,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3798,9 +3798,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3855,9 +3855,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3878,9 +3878,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3901,9 +3901,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3925,9 +3925,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3949,9 +3949,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3973,9 +3973,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3997,9 +3997,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4021,9 +4021,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4045,9 +4045,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4094,9 +4094,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4145,9 +4145,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4192,9 +4192,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4239,9 +4239,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4286,9 +4286,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4332,9 +4332,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4378,9 +4378,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4425,9 +4425,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4472,9 +4472,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4519,9 +4519,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4566,9 +4566,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4588,9 +4588,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuEig_internal.cu b/src/backend/linalg_internal_gpu/cuEig_internal.cu index 35442cf9..1dca9423 100644 --- a/src/backend/linalg_internal_gpu/cuEig_internal.cu +++ b/src/backend/linalg_internal_gpu/cuEig_internal.cu @@ -37,7 +37,7 @@ namespace cytnx { NULL, 1, d_work, lwork, devInfo); cudaMemcpy(e->Mem, d_W, sizeof(cuDoubleComplex) * L, cudaMemcpyDeviceToHost); - if (v->dtype != Type.Void) { + if (v->dtype() != Type.Void) { cudaMemcpy(v->Mem, d_V, sizeof(cuDoubleComplex) * L * L, cudaMemcpyDeviceToHost); } @@ -77,7 +77,7 @@ namespace cytnx { NULL, 1, d_work, lwork, devInfo); cudaMemcpy(e->Mem, d_W, sizeof(cuFloatComplex) * L, cudaMemcpyDeviceToHost); - if (v->dtype != Type.Void) { + if (v->dtype() != Type.Void) { cudaMemcpy(v->Mem, d_V, sizeof(cuFloatComplex) * L * L, cudaMemcpyDeviceToHost); } @@ -117,7 +117,7 @@ namespace cytnx { NULL, 1, d_work, lwork, devInfo); cudaMemcpy(e->Mem, d_W, sizeof(double) * L, cudaMemcpyDeviceToHost); - if (v->dtype != Type.Void) { + if (v->dtype() != Type.Void) { cudaMemcpy(v->Mem, d_V, sizeof(double) * L * L, cudaMemcpyDeviceToHost); } @@ -157,7 +157,7 @@ namespace cytnx { NULL, 1, d_work, lwork, devInfo); cudaMemcpy(e->Mem, d_W, sizeof(float) * L, cudaMemcpyDeviceToHost); - if (v->dtype != Type.Void) { + if (v->dtype() != Type.Void) { cudaMemcpy(v->Mem, d_V, sizeof(float) * L * L, cudaMemcpyDeviceToHost); } diff --git a/src/backend/linalg_internal_gpu/cuEigh_internal.cu b/src/backend/linalg_internal_gpu/cuEigh_internal.cu index 5f344105..93626be1 100644 --- a/src/backend/linalg_internal_gpu/cuEigh_internal.cu +++ b/src/backend/linalg_internal_gpu/cuEigh_internal.cu @@ -12,7 +12,7 @@ namespace cytnx { boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int64 &L) { cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; - if (v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + if (v->dtype() == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; // create handles: cusolverDnHandle_t cusolverH = NULL; @@ -20,12 +20,13 @@ namespace cytnx { cytnx_complex128 *tA; if (v != NULL) { - tA = (cytnx_complex128 *)v->Mem; - checkCudaErrors(cudaMemcpy(v->Mem, in->Mem, sizeof(cytnx_complex128) * cytnx_uint64(L) * L, + tA = (cytnx_complex128 *)v->data(); + checkCudaErrors(cudaMemcpy(v->data(), in->data(), + sizeof(cytnx_complex128) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } else { checkCudaErrors(cudaMalloc((void **)&tA, cytnx_uint64(L) * L * sizeof(cytnx_complex128))); - checkCudaErrors(cudaMemcpy(tA, in->Mem, sizeof(cytnx_complex128) * cytnx_uint64(L) * L, + checkCudaErrors(cudaMemcpy(tA, in->data(), sizeof(cytnx_complex128) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } @@ -34,7 +35,7 @@ namespace cytnx { cytnx_int32 b32L = L; checkCudaErrors(cusolverDnZheevd_bufferSize(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, (cuDoubleComplex *)tA, b32L, - (cytnx_double *)e->Mem, &lwork)); + (cytnx_double *)e->data(), &lwork)); // allocate working space: cytnx_complex128 *work; @@ -45,7 +46,7 @@ namespace cytnx { cytnx_int32 *devinfo; checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); checkCudaErrors(cusolverDnZheevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, - (cuDoubleComplex *)tA, b32L, (cytnx_double *)e->Mem, + (cuDoubleComplex *)tA, b32L, (cytnx_double *)e->data(), (cuDoubleComplex *)work, lwork, devinfo)); // get info @@ -55,7 +56,7 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnZheevd': cuBlas INFO = ", info); cudaFree(work); - if (v->dtype == Type.Void) cudaFree(tA); + if (v->dtype() == Type.Void) cudaFree(tA); cudaFree(devinfo); cusolverDnDestroy(cusolverH); @@ -64,7 +65,7 @@ namespace cytnx { boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int64 &L) { cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; - if (v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + if (v->dtype() == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; // create handles: cusolverDnHandle_t cusolverH = NULL; @@ -72,12 +73,13 @@ namespace cytnx { cytnx_complex64 *tA; if (v != NULL) { - tA = (cytnx_complex64 *)v->Mem; - checkCudaErrors(cudaMemcpy(v->Mem, in->Mem, sizeof(cytnx_complex64) * cytnx_uint64(L) * L, + tA = (cytnx_complex64 *)v->data(); + checkCudaErrors(cudaMemcpy(v->data(), in->data(), + sizeof(cytnx_complex64) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } else { checkCudaErrors(cudaMalloc((void **)&tA, cytnx_uint64(L) * L * sizeof(cytnx_complex64))); - checkCudaErrors(cudaMemcpy(tA, in->Mem, sizeof(cytnx_complex64) * cytnx_uint64(L) * L, + checkCudaErrors(cudaMemcpy(tA, in->data(), sizeof(cytnx_complex64) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } @@ -85,8 +87,8 @@ namespace cytnx { cytnx_int32 lwork = 0; cytnx_int32 b32L = L; checkCudaErrors(cusolverDnCheevd_bufferSize(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, - (cuFloatComplex *)tA, b32L, (cytnx_float *)e->Mem, - &lwork)); + (cuFloatComplex *)tA, b32L, + (cytnx_float *)e->data(), &lwork)); // allocate working space: cytnx_complex64 *work; @@ -97,7 +99,7 @@ namespace cytnx { cytnx_int32 *devinfo; checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); checkCudaErrors(cusolverDnCheevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, - (cuFloatComplex *)tA, b32L, (cytnx_float *)e->Mem, + (cuFloatComplex *)tA, b32L, (cytnx_float *)e->data(), (cuFloatComplex *)work, lwork, devinfo)); // get info @@ -107,7 +109,7 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnZheevd': cuBlas INFO = ", info); cudaFree(work); - if (v->dtype == Type.Void) cudaFree(tA); + if (v->dtype() == Type.Void) cudaFree(tA); cudaFree(devinfo); cusolverDnDestroy(cusolverH); @@ -116,20 +118,21 @@ namespace cytnx { boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int64 &L) { cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; - if (v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + if (v->dtype() == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; // create handles: cusolverDnHandle_t cusolverH = NULL; checkCudaErrors(cusolverDnCreate(&cusolverH)); cytnx_double *tA; - if (v->dtype != Type.Void) { - tA = (cytnx_double *)v->Mem; - checkCudaErrors(cudaMemcpy(v->Mem, in->Mem, sizeof(cytnx_double) * cytnx_uint64(L) * L, + if (v->dtype() != Type.Void) { + tA = (cytnx_double *)v->data(); + checkCudaErrors(cudaMemcpy(v->data(), in->data(), + sizeof(cytnx_double) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } else { checkCudaErrors(cudaMalloc((void **)&tA, cytnx_uint64(L) * L * sizeof(cytnx_double))); - checkCudaErrors(cudaMemcpy(tA, in->Mem, sizeof(cytnx_double) * cytnx_uint64(L) * L, + checkCudaErrors(cudaMemcpy(tA, in->data(), sizeof(cytnx_double) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } @@ -137,7 +140,7 @@ namespace cytnx { cytnx_int32 lwork = 0; cytnx_int32 b32L = L; checkCudaErrors(cusolverDnDsyevd_bufferSize(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, tA, - b32L, (cytnx_double *)e->Mem, &lwork)); + b32L, (cytnx_double *)e->data(), &lwork)); // allocate working space: cytnx_double *work; @@ -148,7 +151,7 @@ namespace cytnx { cytnx_int32 *devinfo; checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); checkCudaErrors(cusolverDnDsyevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, tA, b32L, - (cytnx_double *)e->Mem, work, lwork, devinfo)); + (cytnx_double *)e->data(), work, lwork, devinfo)); // get info checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); @@ -157,7 +160,7 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnDsysevd': cuBlas INFO = ", info); cudaFree(work); - if (v->dtype == Type.Void) cudaFree(tA); + if (v->dtype() == Type.Void) cudaFree(tA); cudaFree(devinfo); cusolverDnDestroy(cusolverH); @@ -166,20 +169,20 @@ namespace cytnx { boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int64 &L) { cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; - if (v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + if (v->dtype() == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; // create handles: cusolverDnHandle_t cusolverH = NULL; checkCudaErrors(cusolverDnCreate(&cusolverH)); cytnx_float *tA; - if (v->dtype != Type.Void) { - tA = (cytnx_float *)v->Mem; - checkCudaErrors(cudaMemcpy(v->Mem, in->Mem, sizeof(cytnx_float) * cytnx_uint64(L) * L, + if (v->dtype() != Type.Void) { + tA = (cytnx_float *)v->data(); + checkCudaErrors(cudaMemcpy(v->data(), in->data(), sizeof(cytnx_float) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } else { checkCudaErrors(cudaMalloc((void **)&tA, cytnx_uint64(L) * L * sizeof(cytnx_float))); - checkCudaErrors(cudaMemcpy(tA, in->Mem, sizeof(cytnx_float) * cytnx_uint64(L) * L, + checkCudaErrors(cudaMemcpy(tA, in->data(), sizeof(cytnx_float) * cytnx_uint64(L) * L, cudaMemcpyDeviceToDevice)); } @@ -187,7 +190,7 @@ namespace cytnx { cytnx_int32 lwork = 0; cytnx_int32 b32L = L; checkCudaErrors(cusolverDnSsyevd_bufferSize(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, tA, - b32L, (cytnx_float *)e->Mem, &lwork)); + b32L, (cytnx_float *)e->data(), &lwork)); // allocate working space: cytnx_float *work; @@ -198,7 +201,7 @@ namespace cytnx { cytnx_int32 *devinfo; checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); checkCudaErrors(cusolverDnSsyevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, b32L, tA, b32L, - (cytnx_float *)e->Mem, work, lwork, devinfo)); + (cytnx_float *)e->data(), work, lwork, devinfo)); // get info checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); @@ -206,7 +209,7 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnDsysevd': cuBlas INFO = ", info); cudaFree(work); - if (v->dtype == Type.Void) cudaFree(tA); + if (v->dtype() == Type.Void) cudaFree(tA); cudaFree(devinfo); cusolverDnDestroy(cusolverH); diff --git a/src/backend/linalg_internal_gpu/cuExp_internal.cu b/src/backend/linalg_internal_gpu/cuExp_internal.cu index d1d1aee3..4c6eeab6 100644 --- a/src/backend/linalg_internal_gpu/cuExp_internal.cu +++ b/src/backend/linalg_internal_gpu/cuExp_internal.cu @@ -69,8 +69,8 @@ namespace cytnx { const cytnx_uint64 &Nelem) { cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; - cuExp_internal_kernel_d<<>>((cytnx_double *)out->Mem, (cytnx_double *)ten->Mem, - Nelem); + cuExp_internal_kernel_d<<>>((cytnx_double *)out->data(), + (cytnx_double *)ten->data(), Nelem); } void cuExp_internal_f(boost::intrusive_ptr &out, @@ -78,8 +78,8 @@ namespace cytnx { const cytnx_uint64 &Nelem) { cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; - cuExp_internal_kernel_f<<>>((cytnx_float *)out->Mem, (cytnx_float *)ten->Mem, - Nelem); + cuExp_internal_kernel_f<<>>((cytnx_float *)out->data(), + (cytnx_float *)ten->data(), Nelem); } void cuExp_internal_cd(boost::intrusive_ptr &out, @@ -87,8 +87,8 @@ namespace cytnx { const cytnx_uint64 &Nelem) { cytnx_uint32 NBlocks = Nelem / 256; if (Nelem % 256) NBlocks += 1; - cuExp_internal_kernel_cd<<>>((cuDoubleComplex *)out->Mem, - (cuDoubleComplex *)ten->Mem, Nelem); + cuExp_internal_kernel_cd<<>>((cuDoubleComplex *)out->data(), + (cuDoubleComplex *)ten->data(), Nelem); } void cuExp_internal_cf(boost::intrusive_ptr &out, @@ -96,8 +96,8 @@ namespace cytnx { const cytnx_uint64 &Nelem) { cytnx_uint32 NBlocks = Nelem / 256; if (Nelem % 256) NBlocks += 1; - cuExp_internal_kernel_cf<<>>((cuFloatComplex *)out->Mem, - (cuFloatComplex *)ten->Mem, Nelem); + cuExp_internal_kernel_cf<<>>((cuFloatComplex *)out->data(), + (cuFloatComplex *)ten->data(), Nelem); } } // namespace linalg_internal diff --git a/src/backend/linalg_internal_gpu/cuGeSvd_internal.cu b/src/backend/linalg_internal_gpu/cuGeSvd_internal.cu index 09814b4c..965f2ab7 100644 --- a/src/backend/linalg_internal_gpu/cuGeSvd_internal.cu +++ b/src/backend/linalg_internal_gpu/cuGeSvd_internal.cu @@ -15,8 +15,8 @@ namespace cytnx { using d_data_type = cuDoubleComplex; cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -28,21 +28,21 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -57,7 +57,7 @@ namespace cytnx { cytnx_int32 lwork = 0; void *d_work = nullptr; checkCudaErrors(cusolverDnZgesvdj_bufferSize( - cusolverH, jobz, econ, N, M, (d_data_type *)Mij, ldA, (cytnx_double *)S->Mem, + cusolverH, jobz, econ, N, M, (d_data_type *)Mij, ldA, (cytnx_double *)S->data(), (d_data_type *)vTMem, ldu, (d_data_type *)UMem, ldvT, &lwork, gesvdj_params)); checkCudaErrors(cudaMalloc(reinterpret_cast(&d_work), sizeof(data_type) * lwork)); @@ -67,10 +67,10 @@ namespace cytnx { checkCudaErrors(cudaMemset(devinfo, 0, sizeof(cytnx_int32))); checkCudaErrors(cusolverDnZgesvdj(cusolverH, jobz, econ, N, M, (d_data_type *)Mij, ldA, - (cytnx_double *)S->Mem, (d_data_type *)vTMem, ldu, + (cytnx_double *)S->data(), (d_data_type *)vTMem, ldu, (d_data_type *)UMem, ldvT, (d_data_type *)d_work, lwork, devinfo, gesvdj_params)); - if (U->Mem and jobz == CUSOLVER_EIG_MODE_VECTOR) { + if (U->data() and jobz == CUSOLVER_EIG_MODE_VECTOR) { U->Move_memory_({(cytnx_uint64)min, (cytnx_uint64)M}, {1, 0}, {1, 0}); linalg_internal::cuConj_inplace_internal_cd(U, M * min); } @@ -85,10 +85,10 @@ namespace cytnx { checkCudaErrors(cudaFree(d_work)); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); @@ -103,8 +103,8 @@ namespace cytnx { using d_data_type = cuFloatComplex; cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -116,21 +116,21 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -145,7 +145,7 @@ namespace cytnx { cytnx_int32 lwork = 0; void *d_work = nullptr; checkCudaErrors(cusolverDnCgesvdj_bufferSize( - cusolverH, jobz, econ, N, M, (d_data_type *)Mij, ldA, (cytnx_float *)S->Mem, + cusolverH, jobz, econ, N, M, (d_data_type *)Mij, ldA, (cytnx_float *)S->data(), (d_data_type *)vTMem, ldu, (d_data_type *)UMem, ldvT, &lwork, gesvdj_params)); checkCudaErrors(cudaMalloc(reinterpret_cast(&d_work), sizeof(data_type) * lwork)); @@ -155,10 +155,10 @@ namespace cytnx { checkCudaErrors(cudaMemset(devinfo, 0, sizeof(cytnx_int32))); checkCudaErrors(cusolverDnCgesvdj(cusolverH, jobz, econ, N, M, (d_data_type *)Mij, ldA, - (cytnx_float *)S->Mem, (d_data_type *)vTMem, ldu, + (cytnx_float *)S->data(), (d_data_type *)vTMem, ldu, (d_data_type *)UMem, ldvT, (d_data_type *)d_work, lwork, devinfo, gesvdj_params)); - if (U->Mem and jobz == CUSOLVER_EIG_MODE_VECTOR) { + if (U->data() and jobz == CUSOLVER_EIG_MODE_VECTOR) { U->Move_memory_({(cytnx_uint64)min, (cytnx_uint64)M}, {1, 0}, {1, 0}); linalg_internal::cuConj_inplace_internal_cf(U, M * min); } @@ -173,10 +173,10 @@ namespace cytnx { checkCudaErrors(cudaFree(d_work)); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); @@ -190,8 +190,8 @@ namespace cytnx { using data_type = cytnx_double; cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -203,21 +203,21 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -232,8 +232,8 @@ namespace cytnx { cytnx_int32 lwork = 0; void *d_work = nullptr; checkCudaErrors(cusolverDnDgesvdj_bufferSize( - cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, (data_type *)S->Mem, (data_type *)vTMem, - ldu, (data_type *)UMem, ldvT, &lwork, gesvdj_params)); + cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, (data_type *)S->data(), + (data_type *)vTMem, ldu, (data_type *)UMem, ldvT, &lwork, gesvdj_params)); checkCudaErrors(cudaMalloc(reinterpret_cast(&d_work), sizeof(data_type) * lwork)); @@ -241,10 +241,11 @@ namespace cytnx { checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); checkCudaErrors(cudaMemset(devinfo, 0, sizeof(cytnx_int32))); - checkCudaErrors(cusolverDnDgesvdj( - cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, (data_type *)S->Mem, (data_type *)vTMem, - ldu, (data_type *)UMem, ldvT, (data_type *)d_work, lwork, devinfo, gesvdj_params)); - if (U->Mem and jobz == CUSOLVER_EIG_MODE_VECTOR) { + checkCudaErrors(cusolverDnDgesvdj(cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, + (data_type *)S->data(), (data_type *)vTMem, ldu, + (data_type *)UMem, ldvT, (data_type *)d_work, lwork, + devinfo, gesvdj_params)); + if (U->data() and jobz == CUSOLVER_EIG_MODE_VECTOR) { U->Move_memory_({(cytnx_uint64)min, (cytnx_uint64)M}, {1, 0}, {1, 0}); } @@ -258,10 +259,10 @@ namespace cytnx { checkCudaErrors(cudaFree(d_work)); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); @@ -275,8 +276,8 @@ namespace cytnx { using data_type = cytnx_float; cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -288,21 +289,21 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -317,8 +318,8 @@ namespace cytnx { cytnx_int32 lwork = 0; void *d_work = nullptr; checkCudaErrors(cusolverDnSgesvdj_bufferSize( - cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, (data_type *)S->Mem, (data_type *)vTMem, - ldu, (data_type *)UMem, ldvT, &lwork, gesvdj_params)); + cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, (data_type *)S->data(), + (data_type *)vTMem, ldu, (data_type *)UMem, ldvT, &lwork, gesvdj_params)); checkCudaErrors(cudaMalloc(reinterpret_cast(&d_work), sizeof(data_type) * lwork)); @@ -326,10 +327,11 @@ namespace cytnx { checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); checkCudaErrors(cudaMemset(devinfo, 0, sizeof(cytnx_int32))); - checkCudaErrors(cusolverDnSgesvdj( - cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, (data_type *)S->Mem, (data_type *)vTMem, - ldu, (data_type *)UMem, ldvT, (data_type *)d_work, lwork, devinfo, gesvdj_params)); - if (U->Mem and jobz == CUSOLVER_EIG_MODE_VECTOR) { + checkCudaErrors(cusolverDnSgesvdj(cusolverH, jobz, econ, N, M, (data_type *)Mij, ldA, + (data_type *)S->data(), (data_type *)vTMem, ldu, + (data_type *)UMem, ldvT, (data_type *)d_work, lwork, + devinfo, gesvdj_params)); + if (U->data() and jobz == CUSOLVER_EIG_MODE_VECTOR) { U->Move_memory_({(cytnx_uint64)min, (cytnx_uint64)M}, {1, 0}, {1, 0}); } @@ -343,10 +345,10 @@ namespace cytnx { checkCudaErrors(cudaFree(d_work)); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); diff --git a/src/backend/linalg_internal_gpu/cuGemm_internal.cu b/src/backend/linalg_internal_gpu/cuGemm_internal.cu index 6b1cc84b..f309d186 100644 --- a/src/backend/linalg_internal_gpu/cuGemm_internal.cu +++ b/src/backend/linalg_internal_gpu/cuGemm_internal.cu @@ -16,9 +16,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex128 alpha = complex128(a), beta = complex128(b); - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_inl = (cuDoubleComplex *)inl->Mem; - cuDoubleComplex *_inr = (cuDoubleComplex *)inr->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_inl = (cuDoubleComplex *)inl->data(); + cuDoubleComplex *_inr = (cuDoubleComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -38,9 +38,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex64 alpha = complex64(a), beta = complex64(b); - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_inl = (cuFloatComplex *)inl->Mem; - cuFloatComplex *_inr = (cuFloatComplex *)inr->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_inl = (cuFloatComplex *)inl->data(); + cuFloatComplex *_inr = (cuFloatComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -61,9 +61,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_double alpha = double(a), beta = double(b); - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -82,9 +82,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_float alpha = float(a), beta = float(b); - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; diff --git a/src/backend/linalg_internal_gpu/cuGer_internal.cu b/src/backend/linalg_internal_gpu/cuGer_internal.cu index 661209a8..509ab292 100644 --- a/src/backend/linalg_internal_gpu/cuGer_internal.cu +++ b/src/backend/linalg_internal_gpu/cuGer_internal.cu @@ -18,9 +18,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex128 alpha = complex128(a); - cuDoubleComplex *_A = (cuDoubleComplex *)A->Mem; - cuDoubleComplex *_x = (cuDoubleComplex *)x->Mem; - cuDoubleComplex *_y = (cuDoubleComplex *)y->Mem; + cuDoubleComplex *_A = (cuDoubleComplex *)A->data(); + cuDoubleComplex *_x = (cuDoubleComplex *)x->data(); + cuDoubleComplex *_y = (cuDoubleComplex *)y->data(); checkCudaErrors(cublasZgeru(cublasH, y->size(), x->size(), (cuDoubleComplex *)&alpha, _y, 1, _x, 1, _A, y->size())); @@ -36,9 +36,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex64 alpha = complex64(a); - cuFloatComplex *_A = (cuFloatComplex *)A->Mem; - cuFloatComplex *_x = (cuFloatComplex *)x->Mem; - cuFloatComplex *_y = (cuFloatComplex *)y->Mem; + cuFloatComplex *_A = (cuFloatComplex *)A->data(); + cuFloatComplex *_x = (cuFloatComplex *)x->data(); + cuFloatComplex *_y = (cuFloatComplex *)y->data(); checkCudaErrors(cublasCgeru(cublasH, y->size(), x->size(), (cuFloatComplex *)&alpha, _y, 1, _x, 1, _A, y->size())); @@ -54,9 +54,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_double alpha = cytnx_double(a); - cytnx_double *_A = (cytnx_double *)A->Mem; - cytnx_double *_x = (cytnx_double *)x->Mem; - cytnx_double *_y = (cytnx_double *)y->Mem; + cytnx_double *_A = (cytnx_double *)A->data(); + cytnx_double *_x = (cytnx_double *)x->data(); + cytnx_double *_y = (cytnx_double *)y->data(); checkCudaErrors( cublasDger(cublasH, y->size(), x->size(), &alpha, _y, 1, _x, 1, _A, y->size())); @@ -72,9 +72,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_float alpha = cytnx_float(a); - cytnx_float *_A = (cytnx_float *)A->Mem; - cytnx_float *_x = (cytnx_float *)x->Mem; - cytnx_float *_y = (cytnx_float *)y->Mem; + cytnx_float *_A = (cytnx_float *)A->data(); + cytnx_float *_x = (cytnx_float *)x->data(); + cytnx_float *_y = (cytnx_float *)y->data(); checkCudaErrors( cublasSger(cublasH, y->size(), x->size(), &alpha, _y, 1, _x, 1, _A, y->size())); diff --git a/src/backend/linalg_internal_gpu/cuInvM_inplace_internal.cu b/src/backend/linalg_internal_gpu/cuInvM_inplace_internal.cu index ccaec68f..8c836d67 100644 --- a/src/backend/linalg_internal_gpu/cuInvM_inplace_internal.cu +++ b/src/backend/linalg_internal_gpu/cuInvM_inplace_internal.cu @@ -20,11 +20,11 @@ namespace cytnx { checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); // trf: checkCudaErrors( - cusolverDnDgetrf_bufferSize(cusolverH, L, L, (cytnx_double *)ten->Mem, L, &lwork)); + cusolverDnDgetrf_bufferSize(cusolverH, L, L, (cytnx_double *)ten->data(), L, &lwork)); checkCudaErrors(cudaMalloc((void **)&d_work, sizeof(cytnx_double) * lwork)); checkCudaErrors( - cusolverDnDgetrf(cusolverH, L, L, (cytnx_double *)ten->Mem, L, d_work, ipiv, devinfo)); + cusolverDnDgetrf(cusolverH, L, L, (cytnx_double *)ten->data(), L, d_work, ipiv, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", @@ -39,7 +39,7 @@ namespace cytnx { checkCudaErrors(cudaMemcpy(d_I, h_I, sizeof(cytnx_double) * L * L, cudaMemcpyHostToDevice)); - checkCudaErrors(cusolverDnDgetrs(cusolverH, CUBLAS_OP_N, L, L, (cytnx_double *)ten->Mem, L, + checkCudaErrors(cusolverDnDgetrs(cusolverH, CUBLAS_OP_N, L, L, (cytnx_double *)ten->data(), L, ipiv, d_I, L, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); @@ -47,7 +47,7 @@ namespace cytnx { "ERROR in cuSolver function 'cusolverDnDgetrs': cuBlas INFO = ", info); checkCudaErrors( - cudaMemcpy(ten->Mem, d_I, sizeof(cytnx_double) * L * L, cudaMemcpyDeviceToDevice)); + cudaMemcpy(ten->data(), d_I, sizeof(cytnx_double) * L * L, cudaMemcpyDeviceToDevice)); cudaFree(d_I); cudaFree(d_work); @@ -70,11 +70,11 @@ namespace cytnx { checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); // trf: checkCudaErrors( - cusolverDnSgetrf_bufferSize(cusolverH, L, L, (cytnx_float *)ten->Mem, L, &lwork)); + cusolverDnSgetrf_bufferSize(cusolverH, L, L, (cytnx_float *)ten->data(), L, &lwork)); checkCudaErrors(cudaMalloc((void **)&d_work, sizeof(cytnx_float) * lwork)); checkCudaErrors( - cusolverDnSgetrf(cusolverH, L, L, (cytnx_float *)ten->Mem, L, d_work, ipiv, devinfo)); + cusolverDnSgetrf(cusolverH, L, L, (cytnx_float *)ten->data(), L, d_work, ipiv, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", @@ -89,7 +89,7 @@ namespace cytnx { checkCudaErrors(cudaMemcpy(d_I, h_I, sizeof(cytnx_float) * L * L, cudaMemcpyHostToDevice)); - checkCudaErrors(cusolverDnSgetrs(cusolverH, CUBLAS_OP_N, L, L, (cytnx_float *)ten->Mem, L, + checkCudaErrors(cusolverDnSgetrs(cusolverH, CUBLAS_OP_N, L, L, (cytnx_float *)ten->data(), L, ipiv, d_I, L, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); @@ -97,7 +97,7 @@ namespace cytnx { "ERROR in cuSolver function 'cusolverDnSgetrs': cuBlas INFO = ", info); checkCudaErrors( - cudaMemcpy(ten->Mem, d_I, sizeof(cytnx_float) * L * L, cudaMemcpyDeviceToDevice)); + cudaMemcpy(ten->data(), d_I, sizeof(cytnx_float) * L * L, cudaMemcpyDeviceToDevice)); cudaFree(d_I); cudaFree(d_work); @@ -120,10 +120,10 @@ namespace cytnx { checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); // trf: checkCudaErrors( - cusolverDnZgetrf_bufferSize(cusolverH, L, L, (cuDoubleComplex *)ten->Mem, L, &lwork)); + cusolverDnZgetrf_bufferSize(cusolverH, L, L, (cuDoubleComplex *)ten->data(), L, &lwork)); checkCudaErrors(cudaMalloc((void **)&d_work, sizeof(cytnx_complex128) * lwork)); - checkCudaErrors(cusolverDnZgetrf(cusolverH, L, L, (cuDoubleComplex *)ten->Mem, L, + checkCudaErrors(cusolverDnZgetrf(cusolverH, L, L, (cuDoubleComplex *)ten->data(), L, (cuDoubleComplex *)d_work, ipiv, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); @@ -140,15 +140,15 @@ namespace cytnx { checkCudaErrors( cudaMemcpy(d_I, h_I, sizeof(cytnx_complex128) * L * L, cudaMemcpyHostToDevice)); - checkCudaErrors(cusolverDnZgetrs(cusolverH, CUBLAS_OP_N, L, L, (cuDoubleComplex *)ten->Mem, L, - ipiv, (cuDoubleComplex *)d_I, L, devinfo)); + checkCudaErrors(cusolverDnZgetrs(cusolverH, CUBLAS_OP_N, L, L, (cuDoubleComplex *)ten->data(), + L, ipiv, (cuDoubleComplex *)d_I, L, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "ERROR in cuSolver function 'cusolverDnZgetrs': cuBlas INFO = ", info); checkCudaErrors( - cudaMemcpy(ten->Mem, d_I, sizeof(cytnx_complex128) * L * L, cudaMemcpyDeviceToDevice)); + cudaMemcpy(ten->data(), d_I, sizeof(cytnx_complex128) * L * L, cudaMemcpyDeviceToDevice)); cudaFree(d_I); cudaFree(d_work); @@ -172,10 +172,10 @@ namespace cytnx { checkCudaErrors(cudaMalloc((void **)&devinfo, sizeof(cytnx_int32))); // trf: checkCudaErrors( - cusolverDnCgetrf_bufferSize(cusolverH, L, L, (cuFloatComplex *)ten->Mem, L, &lwork)); + cusolverDnCgetrf_bufferSize(cusolverH, L, L, (cuFloatComplex *)ten->data(), L, &lwork)); checkCudaErrors(cudaMalloc((void **)&d_work, sizeof(cytnx_complex64) * lwork)); - checkCudaErrors(cusolverDnCgetrf(cusolverH, L, L, (cuFloatComplex *)ten->Mem, L, + checkCudaErrors(cusolverDnCgetrf(cusolverH, L, L, (cuFloatComplex *)ten->data(), L, (cuFloatComplex *)d_work, ipiv, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); @@ -192,15 +192,15 @@ namespace cytnx { checkCudaErrors( cudaMemcpy(d_I, h_I, sizeof(cytnx_complex64) * L * L, cudaMemcpyHostToDevice)); - checkCudaErrors(cusolverDnCgetrs(cusolverH, CUBLAS_OP_N, L, L, (cuFloatComplex *)ten->Mem, L, - ipiv, (cuFloatComplex *)d_I, L, devinfo)); + checkCudaErrors(cusolverDnCgetrs(cusolverH, CUBLAS_OP_N, L, L, (cuFloatComplex *)ten->data(), + L, ipiv, (cuFloatComplex *)d_I, L, devinfo)); checkCudaErrors(cudaMemcpy(&info, devinfo, sizeof(cytnx_int32), cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "ERROR in cuSolver function 'cusolverDnCgetrs': cuBlas INFO = ", info); checkCudaErrors( - cudaMemcpy(ten->Mem, d_I, sizeof(cytnx_complex64) * L * L, cudaMemcpyDeviceToDevice)); + cudaMemcpy(ten->data(), d_I, sizeof(cytnx_complex64) * L * L, cudaMemcpyDeviceToDevice)); cudaFree(d_I); cudaFree(d_work); diff --git a/src/backend/linalg_internal_gpu/cuInv_inplace_internal.cu b/src/backend/linalg_internal_gpu/cuInv_inplace_internal.cu index d7f0c5d5..95adc9e7 100644 --- a/src/backend/linalg_internal_gpu/cuInv_inplace_internal.cu +++ b/src/backend/linalg_internal_gpu/cuInv_inplace_internal.cu @@ -62,28 +62,28 @@ namespace cytnx { const cytnx_uint64 &Nelem, const double &clip) { cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; - cuInv_internal_kernel_d<<>>((cytnx_double *)ten->Mem, Nelem, clip); + cuInv_internal_kernel_d<<>>((cytnx_double *)ten->data(), Nelem, clip); } void cuInv_inplace_internal_f(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const double &clip) { cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; - cuInv_internal_kernel_f<<>>((cytnx_float *)ten->Mem, Nelem, clip); + cuInv_internal_kernel_f<<>>((cytnx_float *)ten->data(), Nelem, clip); } void cuInv_inplace_internal_cd(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const double &clip) { cytnx_uint32 NBlocks = Nelem / 256; if (Nelem % 256) NBlocks += 1; - cuInv_internal_kernel_cd<<>>((cuDoubleComplex *)ten->Mem, Nelem, clip); + cuInv_internal_kernel_cd<<>>((cuDoubleComplex *)ten->data(), Nelem, clip); } void cuInv_inplace_internal_cf(boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const double &clip) { cytnx_uint32 NBlocks = Nelem / 256; if (Nelem % 256) NBlocks += 1; - cuInv_internal_kernel_cf<<>>((cuFloatComplex *)ten->Mem, Nelem, clip); + cuInv_internal_kernel_cf<<>>((cuFloatComplex *)ten->data(), Nelem, clip); } } // namespace linalg_internal diff --git a/src/backend/linalg_internal_gpu/cuMatmul_dg_internal.cu b/src/backend/linalg_internal_gpu/cuMatmul_dg_internal.cu index 4378e8c7..85c682dc 100644 --- a/src/backend/linalg_internal_gpu/cuMatmul_dg_internal.cu +++ b/src/backend/linalg_internal_gpu/cuMatmul_dg_internal.cu @@ -40,9 +40,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); // cytnx_complex128 alpha = cytnx_complex128(1,0), beta=cytnx_complex128(0,0); - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_inl = (cuDoubleComplex *)inl->Mem; - cuDoubleComplex *_inr = (cuDoubleComplex *)inr->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_inl = (cuDoubleComplex *)inl->data(); + cuDoubleComplex *_inr = (cuDoubleComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; //, blsComm=Comm; @@ -65,9 +65,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); // cytnx_complex64 alpha = cytnx_complex64(1,0), beta=cytnx_complex64(0,0); - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_inl = (cuFloatComplex *)inl->Mem; - cuFloatComplex *_inr = (cuFloatComplex *)inr->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_inl = (cuFloatComplex *)inl->data(); + cuFloatComplex *_inr = (cuFloatComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; //, blsComm=Comm; @@ -91,9 +91,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); // cytnx_double alpha = 1, beta=0; - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; //, blsComm=Comm; @@ -119,9 +119,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); // cytnx_float alpha = 1, beta=0; - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; //, blsComm=Comm; @@ -139,9 +139,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -155,9 +155,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -172,9 +172,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -189,9 +189,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -206,9 +206,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_inl = (cytnx_int16 *)inl->Mem; - cytnx_int16 *_inr = (cytnx_int16 *)inr->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_inl = (cytnx_int16 *)inl->data(); + cytnx_int16 *_inr = (cytnx_int16 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -223,9 +223,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_inl = (cytnx_uint16 *)inl->Mem; - cytnx_uint16 *_inr = (cytnx_uint16 *)inr->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_inl = (cytnx_uint16 *)inl->data(); + cytnx_uint16 *_inr = (cytnx_uint16 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -240,9 +240,9 @@ namespace cytnx { const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr, const int &diag_L) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_inl = (cytnx_bool *)inl->Mem; - cytnx_bool *_inr = (cytnx_bool *)inr->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_inl = (cytnx_bool *)inl->data(); + cytnx_bool *_inr = (cytnx_bool *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuMatmul_internal.cu b/src/backend/linalg_internal_gpu/cuMatmul_internal.cu index 1908cf9a..a60b2561 100644 --- a/src/backend/linalg_internal_gpu/cuMatmul_internal.cu +++ b/src/backend/linalg_internal_gpu/cuMatmul_internal.cu @@ -30,9 +30,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex128 alpha = cytnx_complex128(1, 0), beta = cytnx_complex128(0, 0); - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_inl = (cuDoubleComplex *)inl->Mem; - cuDoubleComplex *_inr = (cuDoubleComplex *)inr->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_inl = (cuDoubleComplex *)inl->data(); + cuDoubleComplex *_inr = (cuDoubleComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -51,9 +51,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex64 alpha = cytnx_complex64(1, 0), beta = cytnx_complex64(0, 0); - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_inl = (cuFloatComplex *)inl->Mem; - cuFloatComplex *_inr = (cuFloatComplex *)inr->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_inl = (cuFloatComplex *)inl->data(); + cuFloatComplex *_inr = (cuFloatComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -73,9 +73,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_double alpha = 1, beta = 0; - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -93,9 +93,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_float alpha = 1, beta = 0; - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr, blsComm = Comm; @@ -108,9 +108,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -121,9 +121,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -134,9 +134,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -147,9 +147,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -160,9 +160,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_inl = (cytnx_int16 *)inl->Mem; - cytnx_int16 *_inr = (cytnx_int16 *)inr->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_inl = (cytnx_int16 *)inl->data(); + cytnx_int16 *_inr = (cytnx_int16 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -173,9 +173,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_inl = (cytnx_uint16 *)inl->Mem; - cytnx_uint16 *_inr = (cytnx_uint16 *)inr->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_inl = (cytnx_uint16 *)inl->data(); + cytnx_uint16 *_inr = (cytnx_uint16 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; @@ -186,9 +186,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Comm, const cytnx_int64 &Nr) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_inl = (cytnx_bool *)inl->Mem; - cytnx_bool *_inr = (cytnx_bool *)inr->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_inl = (cytnx_bool *)inl->data(); + cytnx_bool *_inr = (cytnx_bool *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml) * Nr) / 512; if ((cytnx_uint64(Ml) * Nr) % 512) Nblocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuMatvec_internal.cu b/src/backend/linalg_internal_gpu/cuMatvec_internal.cu index 7f62e45e..3fd55411 100644 --- a/src/backend/linalg_internal_gpu/cuMatvec_internal.cu +++ b/src/backend/linalg_internal_gpu/cuMatvec_internal.cu @@ -30,9 +30,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex128 alpha = cytnx_complex128(1, 0), beta = cytnx_complex128(0, 0); - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_inl = (cuDoubleComplex *)inl->Mem; - cuDoubleComplex *_inr = (cuDoubleComplex *)inr->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_inl = (cuDoubleComplex *)inl->data(); + cuDoubleComplex *_inr = (cuDoubleComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; @@ -52,9 +52,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_complex64 alpha = cytnx_complex64(1, 0), beta = cytnx_complex64(0, 0); - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_inl = (cuFloatComplex *)inl->Mem; - cuFloatComplex *_inr = (cuFloatComplex *)inr->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_inl = (cuFloatComplex *)inl->data(); + cuFloatComplex *_inr = (cuFloatComplex *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; @@ -75,9 +75,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_double alpha = 1, beta = 0; - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_inl = (cytnx_double *)inl->Mem; - cytnx_double *_inr = (cytnx_double *)inr->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_inl = (cytnx_double *)inl->data(); + cytnx_double *_inr = (cytnx_double *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; @@ -97,9 +97,9 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); cytnx_float alpha = 1, beta = 0; - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_inl = (cytnx_float *)inl->Mem; - cytnx_float *_inr = (cytnx_float *)inr->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_inl = (cytnx_float *)inl->data(); + cytnx_float *_inr = (cytnx_float *)inr->data(); // query working space : cytnx_int32 blsMl = Ml, blsNr = Nr; @@ -115,9 +115,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml)) / 512; if ((cytnx_uint64(Ml)) % 512) Nblocks += 1; @@ -128,9 +128,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml)) / 512; if ((cytnx_uint64(Ml)) % 512) Nblocks += 1; @@ -141,9 +141,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_inl = (cytnx_int64 *)inl->Mem; - cytnx_int64 *_inr = (cytnx_int64 *)inr->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_inl = (cytnx_int64 *)inl->data(); + cytnx_int64 *_inr = (cytnx_int64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml)) / 512; if ((cytnx_uint64(Ml)) % 512) Nblocks += 1; @@ -154,9 +154,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_inl = (cytnx_uint64 *)inl->Mem; - cytnx_uint64 *_inr = (cytnx_uint64 *)inr->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_inl = (cytnx_uint64 *)inl->data(); + cytnx_uint64 *_inr = (cytnx_uint64 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml)) / 512; if ((cytnx_uint64(Ml)) % 512) Nblocks += 1; @@ -167,9 +167,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_inl = (cytnx_int16 *)inl->Mem; - cytnx_int16 *_inr = (cytnx_int16 *)inr->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_inl = (cytnx_int16 *)inl->data(); + cytnx_int16 *_inr = (cytnx_int16 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml)) / 512; if ((cytnx_uint64(Ml)) % 512) Nblocks += 1; @@ -180,9 +180,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_inl = (cytnx_uint16 *)inl->Mem; - cytnx_uint16 *_inr = (cytnx_uint16 *)inr->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_inl = (cytnx_uint16 *)inl->data(); + cytnx_uint16 *_inr = (cytnx_uint16 *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml)) / 512; if ((cytnx_uint64(Ml)) % 512) Nblocks += 1; @@ -193,9 +193,9 @@ namespace cytnx { const boost::intrusive_ptr &inl, const boost::intrusive_ptr &inr, const cytnx_int64 &Ml, const cytnx_int64 &Nr) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_inl = (cytnx_bool *)inl->Mem; - cytnx_bool *_inr = (cytnx_bool *)inr->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_inl = (cytnx_bool *)inl->data(); + cytnx_bool *_inr = (cytnx_bool *)inr->data(); cytnx_uint64 Nblocks = (cytnx_uint64(Ml)) / 512; if ((cytnx_uint64(Ml)) % 512) Nblocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuMaxMin_internal.cu b/src/backend/linalg_internal_gpu/cuMaxMin_internal.cu index cd276462..dab5d014 100644 --- a/src/backend/linalg_internal_gpu/cuMaxMin_internal.cu +++ b/src/backend/linalg_internal_gpu/cuMaxMin_internal.cu @@ -500,89 +500,93 @@ namespace cytnx { const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cuDoubleComplex *)out->Mem, (cuDoubleComplex *)ten->Mem, ten->len); + cuMax_gpu_generic((cuDoubleComplex *)out->data(), (cuDoubleComplex *)ten->data(), + ten->size()); else - cuMin_gpu_generic((cuDoubleComplex *)out->Mem, (cuDoubleComplex *)ten->Mem, ten->len); + cuMin_gpu_generic((cuDoubleComplex *)out->data(), (cuDoubleComplex *)ten->data(), + ten->size()); } void cuMaxMin_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cuFloatComplex *)out->Mem, (cuFloatComplex *)ten->Mem, ten->len); + cuMax_gpu_generic((cuFloatComplex *)out->data(), (cuFloatComplex *)ten->data(), + ten->size()); else - cuMin_gpu_generic((cuFloatComplex *)out->Mem, (cuFloatComplex *)ten->Mem, ten->len); + cuMin_gpu_generic((cuFloatComplex *)out->data(), (cuFloatComplex *)ten->data(), + ten->size()); } void cuMaxMin_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_double *)out->Mem, (cytnx_double *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_double *)out->data(), (cytnx_double *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_double *)out->Mem, (cytnx_double *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_double *)out->data(), (cytnx_double *)ten->data(), ten->size()); } void cuMaxMin_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_float *)out->Mem, (cytnx_float *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_float *)out->data(), (cytnx_float *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_float *)out->Mem, (cytnx_float *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_float *)out->data(), (cytnx_float *)ten->data(), ten->size()); } void cuMaxMin_internal_u64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_uint64 *)out->Mem, (cytnx_uint64 *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_uint64 *)out->data(), (cytnx_uint64 *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_uint64 *)out->Mem, (cytnx_uint64 *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_uint64 *)out->data(), (cytnx_uint64 *)ten->data(), ten->size()); } void cuMaxMin_internal_i64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_int64 *)out->Mem, (cytnx_int64 *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_int64 *)out->data(), (cytnx_int64 *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_int64 *)out->Mem, (cytnx_int64 *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_int64 *)out->data(), (cytnx_int64 *)ten->data(), ten->size()); } void cuMaxMin_internal_u32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_uint32 *)out->Mem, (cytnx_uint32 *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_uint32 *)out->data(), (cytnx_uint32 *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_uint32 *)out->Mem, (cytnx_uint32 *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_uint32 *)out->data(), (cytnx_uint32 *)ten->data(), ten->size()); } void cuMaxMin_internal_i32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_int32 *)out->Mem, (cytnx_int32 *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_int32 *)out->data(), (cytnx_int32 *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_int32 *)out->Mem, (cytnx_int32 *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_int32 *)out->data(), (cytnx_int32 *)ten->data(), ten->size()); } void cuMaxMin_internal_u16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_uint16 *)out->Mem, (cytnx_uint16 *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_uint16 *)out->data(), (cytnx_uint16 *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_uint16 *)out->Mem, (cytnx_uint16 *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_uint16 *)out->data(), (cytnx_uint16 *)ten->data(), ten->size()); } void cuMaxMin_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_int16 *)out->Mem, (cytnx_int16 *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_int16 *)out->data(), (cytnx_int16 *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_int16 *)out->Mem, (cytnx_int16 *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_int16 *)out->data(), (cytnx_int16 *)ten->data(), ten->size()); } void cuMaxMin_internal_b(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { if (type == 'x') - cuMax_gpu_generic((cytnx_bool *)out->Mem, (cytnx_bool *)ten->Mem, ten->len); + cuMax_gpu_generic((cytnx_bool *)out->data(), (cytnx_bool *)ten->data(), ten->size()); else - cuMin_gpu_generic((cytnx_bool *)out->Mem, (cytnx_bool *)ten->Mem, ten->len); + cuMin_gpu_generic((cytnx_bool *)out->data(), (cytnx_bool *)ten->data(), ten->size()); } } // namespace linalg_internal diff --git a/src/backend/linalg_internal_gpu/cuMod_internal.cu b/src/backend/linalg_internal_gpu/cuMod_internal.cu index 8b914604..22edd867 100644 --- a/src/backend/linalg_internal_gpu/cuMod_internal.cu +++ b/src/backend/linalg_internal_gpu/cuMod_internal.cu @@ -342,9 +342,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -363,9 +363,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -384,9 +384,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -405,9 +405,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -426,9 +426,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -447,9 +447,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -468,9 +468,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -489,9 +489,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -511,9 +511,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -553,9 +553,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -574,9 +574,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -595,9 +595,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -616,9 +616,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -637,9 +637,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -658,9 +658,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -679,9 +679,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -700,9 +700,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -722,9 +722,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -766,9 +766,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -787,9 +787,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -809,9 +809,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -831,9 +831,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -853,9 +853,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -875,9 +875,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -897,9 +897,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -919,9 +919,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -959,9 +959,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1002,9 +1002,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1023,9 +1023,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1045,9 +1045,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1067,9 +1067,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1089,9 +1089,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1111,9 +1111,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1133,9 +1133,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1155,9 +1155,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1195,9 +1195,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1238,9 +1238,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1259,9 +1259,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1281,9 +1281,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1303,9 +1303,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1325,9 +1325,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1347,9 +1347,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1369,9 +1369,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1391,9 +1391,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1431,9 +1431,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1474,9 +1474,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1495,9 +1495,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1517,9 +1517,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1539,9 +1539,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1561,9 +1561,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1583,9 +1583,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1605,9 +1605,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1627,9 +1627,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1667,9 +1667,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1710,9 +1710,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1731,9 +1731,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1753,9 +1753,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1775,9 +1775,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1797,9 +1797,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1819,9 +1819,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1841,9 +1841,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1863,9 +1863,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1903,9 +1903,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1946,9 +1946,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1967,9 +1967,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1989,9 +1989,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2011,9 +2011,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2033,9 +2033,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2055,9 +2055,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2077,9 +2077,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2099,9 +2099,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2139,9 +2139,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2181,9 +2181,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2203,9 +2203,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2242,9 +2242,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2281,9 +2281,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2320,9 +2320,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2359,9 +2359,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2398,9 +2398,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2437,9 +2437,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2459,9 +2459,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuMul_internal.cu b/src/backend/linalg_internal_gpu/cuMul_internal.cu index 3f70fc15..c9b7aa57 100644 --- a/src/backend/linalg_internal_gpu/cuMul_internal.cu +++ b/src/backend/linalg_internal_gpu/cuMul_internal.cu @@ -89,9 +89,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -145,9 +145,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -201,9 +201,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -257,9 +257,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -315,9 +315,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -372,9 +372,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -429,9 +429,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -486,9 +486,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -543,9 +543,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -600,9 +600,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -656,9 +656,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -721,9 +721,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -777,9 +777,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -833,9 +833,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -890,9 +890,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -947,9 +947,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1004,9 +1004,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1061,9 +1061,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1118,9 +1118,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1175,9 +1175,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1231,9 +1231,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1273,9 +1273,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1296,9 +1296,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1319,9 +1319,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1342,9 +1342,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1365,9 +1365,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1388,9 +1388,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1411,9 +1411,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1434,9 +1434,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1483,9 +1483,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1532,9 +1532,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1555,9 +1555,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1578,9 +1578,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1601,9 +1601,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1624,9 +1624,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1647,9 +1647,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1670,9 +1670,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1719,9 +1719,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1778,9 +1778,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1802,9 +1802,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1826,9 +1826,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1850,9 +1850,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1874,9 +1874,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1898,9 +1898,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1947,9 +1947,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2016,9 +2016,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2040,9 +2040,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2064,9 +2064,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2088,9 +2088,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2112,9 +2112,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2161,9 +2161,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2239,9 +2239,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2263,9 +2263,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2287,9 +2287,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2311,9 +2311,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2360,9 +2360,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2448,9 +2448,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2472,9 +2472,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2496,9 +2496,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2545,9 +2545,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2641,9 +2641,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2665,9 +2665,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2714,9 +2714,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2819,9 +2819,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2868,9 +2868,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2974,9 +2974,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuNorm_internal.cu b/src/backend/linalg_internal_gpu/cuNorm_internal.cu index a9927aed..3e6aee7e 100644 --- a/src/backend/linalg_internal_gpu/cuNorm_internal.cu +++ b/src/backend/linalg_internal_gpu/cuNorm_internal.cu @@ -18,7 +18,7 @@ namespace cytnx { checkCudaErrors(cublasCreate(&cublasH)); checkCudaErrors( - cublasDznrm2(cublasH, Rin->len, (cuDoubleComplex *)Rin->Mem, 1, (double *)out)); + cublasDznrm2(cublasH, Rin->size(), (cuDoubleComplex *)Rin->data(), 1, (double *)out)); cublasDestroy(cublasH); } @@ -26,7 +26,8 @@ namespace cytnx { cublasHandle_t cublasH = NULL; checkCudaErrors(cublasCreate(&cublasH)); - checkCudaErrors(cublasScnrm2(cublasH, Rin->len, (cuComplex *)Rin->Mem, 1, (float *)out)); + checkCudaErrors( + cublasScnrm2(cublasH, Rin->size(), (cuComplex *)Rin->data(), 1, (float *)out)); cublasDestroy(cublasH); } @@ -34,14 +35,14 @@ namespace cytnx { cublasHandle_t cublasH = NULL; checkCudaErrors(cublasCreate(&cublasH)); - checkCudaErrors(cublasDnrm2(cublasH, Rin->len, (double *)Rin->Mem, 1, (double *)out)); + checkCudaErrors(cublasDnrm2(cublasH, Rin->size(), (double *)Rin->data(), 1, (double *)out)); cublasDestroy(cublasH); } void cuNorm_internal_f(void *out, const boost::intrusive_ptr &Rin) { cublasHandle_t cublasH = NULL; checkCudaErrors(cublasCreate(&cublasH)); - checkCudaErrors(cublasSnrm2(cublasH, Rin->len, (float *)Rin->Mem, 1, (float *)out)); + checkCudaErrors(cublasSnrm2(cublasH, Rin->size(), (float *)Rin->data(), 1, (float *)out)); cublasDestroy(cublasH); } diff --git a/src/backend/linalg_internal_gpu/cuOuter_internal.cu b/src/backend/linalg_internal_gpu/cuOuter_internal.cu index 59ec9607..d887904b 100644 --- a/src/backend/linalg_internal_gpu/cuOuter_internal.cu +++ b/src/backend/linalg_internal_gpu/cuOuter_internal.cu @@ -31,11 +31,11 @@ namespace cytnx { const boost::intrusive_ptr &Lin, const boost::intrusive_ptr &Rin, const cytnx_uint64 &j1, const cytnx_uint64 &j2) { - T1 *_out = (T1 *)out->Mem; - T2 *_Lin = (T2 *)Lin->Mem; - T3 *_Rin = (T3 *)Rin->Mem; + T1 *_out = (T1 *)out->data(); + T2 *_Lin = (T2 *)Lin->data(); + T3 *_Rin = (T3 *)Rin->data(); - cytnx_uint64 Nelem = Lin->len * Rin->len; + cytnx_uint64 Nelem = Lin->size() * Rin->size(); cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuPow_internal.cu b/src/backend/linalg_internal_gpu/cuPow_internal.cu index 71771d15..4d178958 100644 --- a/src/backend/linalg_internal_gpu/cuPow_internal.cu +++ b/src/backend/linalg_internal_gpu/cuPow_internal.cu @@ -73,8 +73,8 @@ namespace cytnx { const cytnx_double &p) { cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; - cuPow_internal_kernel_d<<>>((cytnx_double *)out->Mem, (cytnx_double *)ten->Mem, - Nelem, p); + cuPow_internal_kernel_d<<>>((cytnx_double *)out->data(), + (cytnx_double *)ten->data(), Nelem, p); } void cuPow_internal_f(boost::intrusive_ptr &out, @@ -82,8 +82,8 @@ namespace cytnx { const cytnx_double &p) { cytnx_uint32 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; - cuPow_internal_kernel_f<<>>((cytnx_float *)out->Mem, (cytnx_float *)ten->Mem, - Nelem, p); + cuPow_internal_kernel_f<<>>((cytnx_float *)out->data(), + (cytnx_float *)ten->data(), Nelem, p); } void cuPow_internal_cd(boost::intrusive_ptr &out, @@ -91,8 +91,8 @@ namespace cytnx { const cytnx_double &p) { cytnx_uint32 NBlocks = Nelem / 256; if (Nelem % 256) NBlocks += 1; - cuPow_internal_kernel_cd<<>>((cuDoubleComplex *)out->Mem, - (cuDoubleComplex *)ten->Mem, Nelem, p); + cuPow_internal_kernel_cd<<>>((cuDoubleComplex *)out->data(), + (cuDoubleComplex *)ten->data(), Nelem, p); } void cuPow_internal_cf(boost::intrusive_ptr &out, @@ -100,8 +100,8 @@ namespace cytnx { const cytnx_double &p) { cytnx_uint32 NBlocks = Nelem / 256; if (Nelem % 256) NBlocks += 1; - cuPow_internal_kernel_cf<<>>((cuFloatComplex *)out->Mem, - (cuFloatComplex *)ten->Mem, Nelem, p); + cuPow_internal_kernel_cf<<>>((cuFloatComplex *)out->data(), + (cuFloatComplex *)ten->data(), Nelem, p); } } // namespace linalg_internal diff --git a/src/backend/linalg_internal_gpu/cuQuantumGeSvd_internal.cu b/src/backend/linalg_internal_gpu/cuQuantumGeSvd_internal.cu index 8c50d10d..79452109 100644 --- a/src/backend/linalg_internal_gpu/cuQuantumGeSvd_internal.cu +++ b/src/backend/linalg_internal_gpu/cuQuantumGeSvd_internal.cu @@ -92,18 +92,18 @@ namespace cytnx { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { int src = 0; int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex128 *)newU._impl->storage()._impl->Mem + src, - (cytnx_complex128 *)U._impl->storage()._impl->Mem + dest, - truc_dim * sizeof(cytnx_complex128), - cudaMemcpyDeviceToDevice)); + HANDLE_CUDA_ERROR( + cudaMemcpy((cytnx_complex128 *)newU._impl->storage()._impl->data() + src, + (cytnx_complex128 *)U._impl->storage()._impl->data() + dest, + truc_dim * sizeof(cytnx_complex128), cudaMemcpyDeviceToDevice)); src += truc_dim; dest += U.shape()[1]; } @@ -111,22 +111,22 @@ namespace cytnx { } if (is_vT) { // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex128 *)newvT._impl->storage()._impl->Mem, - (cytnx_complex128 *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex128 *)newvT._impl->storage()._impl->data(), + (cytnx_complex128 *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_complex128), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; @@ -140,16 +140,16 @@ namespace cytnx { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { int src = 0; int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex64 *)newU._impl->storage()._impl->Mem + src, - (cytnx_complex64 *)U._impl->storage()._impl->Mem + dest, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex64 *)newU._impl->storage()._impl->data() + src, + (cytnx_complex64 *)U._impl->storage()._impl->data() + dest, truc_dim * sizeof(cytnx_complex64), cudaMemcpyDeviceToDevice)); src += truc_dim; @@ -159,22 +159,22 @@ namespace cytnx { } if (is_vT) { // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex64 *)newvT._impl->storage()._impl->Mem, - (cytnx_complex64 *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex64 *)newvT._impl->storage()._impl->data(), + (cytnx_complex64 *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_complex64), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; @@ -188,16 +188,16 @@ namespace cytnx { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { int src = 0; int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newU._impl->storage()._impl->Mem + src, - (cytnx_double *)U._impl->storage()._impl->Mem + dest, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newU._impl->storage()._impl->data() + src, + (cytnx_double *)U._impl->storage()._impl->data() + dest, truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); src += truc_dim; dest += U.shape()[1]; @@ -206,22 +206,22 @@ namespace cytnx { } if (is_vT) { // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newvT._impl->storage()._impl->Mem, - (cytnx_double *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newvT._impl->storage()._impl->data(), + (cytnx_double *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; @@ -235,16 +235,16 @@ namespace cytnx { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { int src = 0; int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newU._impl->storage()._impl->Mem + src, - (cytnx_float *)U._impl->storage()._impl->Mem + dest, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newU._impl->storage()._impl->data() + src, + (cytnx_float *)U._impl->storage()._impl->data() + dest, truc_dim * sizeof(cytnx_float), cudaMemcpyDeviceToDevice)); src += truc_dim; dest += U.shape()[1]; @@ -253,22 +253,22 @@ namespace cytnx { } if (is_vT) { // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newvT._impl->storage()._impl->Mem, - (cytnx_float *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newvT._impl->storage()._impl->data(), + (cytnx_float *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_float), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; @@ -324,10 +324,10 @@ namespace cytnx { const int32_t numModesU = modesU.size(); const int32_t numModesV = modesV.size(); - void *D_T = Tin._impl->storage()._impl->Mem; - void *D_U = U._impl->storage()._impl->Mem; - void *D_S = S._impl->storage()._impl->Mem; - void *D_V = vT._impl->storage()._impl->Mem; + void *D_T = Tin._impl->storage()._impl->data(); + void *D_U = U._impl->storage()._impl->data(); + void *D_S = S._impl->storage()._impl->data(); + void *D_V = vT._impl->storage()._impl->data(); cutensornetTensorDescriptor_t descTensorIn; cutensornetTensorDescriptor_t descTensorU; @@ -506,7 +506,7 @@ namespace cytnx { } cytnx_uint64 truc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err) { truc_dim--; } else { break; @@ -568,10 +568,10 @@ namespace cytnx { const int32_t numModesU = modesU.size(); const int32_t numModesV = modesV.size(); - void *D_T = Tin._impl->storage()._impl->Mem; - void *D_U = U._impl->storage()._impl->Mem; - void *D_S = S._impl->storage()._impl->Mem; - void *D_V = vT._impl->storage()._impl->Mem; + void *D_T = Tin._impl->storage()._impl->data(); + void *D_U = U._impl->storage()._impl->data(); + void *D_S = S._impl->storage()._impl->data(); + void *D_V = vT._impl->storage()._impl->data(); cutensornetTensorDescriptor_t descTensorIn; cutensornetTensorDescriptor_t descTensorU; @@ -750,7 +750,7 @@ namespace cytnx { } cytnx_uint64 truc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err) { truc_dim--; } else { break; @@ -812,10 +812,10 @@ namespace cytnx { const int32_t numModesU = modesU.size(); const int32_t numModesV = modesV.size(); - void *D_T = Tin._impl->storage()._impl->Mem; - void *D_U = U._impl->storage()._impl->Mem; - void *D_S = S._impl->storage()._impl->Mem; - void *D_V = vT._impl->storage()._impl->Mem; + void *D_T = Tin._impl->storage()._impl->data(); + void *D_U = U._impl->storage()._impl->data(); + void *D_S = S._impl->storage()._impl->data(); + void *D_V = vT._impl->storage()._impl->data(); cutensornetTensorDescriptor_t descTensorIn; cutensornetTensorDescriptor_t descTensorU; @@ -994,7 +994,7 @@ namespace cytnx { } cytnx_uint64 truc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err) { truc_dim--; } else { break; @@ -1056,10 +1056,10 @@ namespace cytnx { const int32_t numModesU = modesU.size(); const int32_t numModesV = modesV.size(); - void *D_T = Tin._impl->storage()._impl->Mem; - void *D_U = U._impl->storage()._impl->Mem; - void *D_S = S._impl->storage()._impl->Mem; - void *D_V = vT._impl->storage()._impl->Mem; + void *D_T = Tin._impl->storage()._impl->data(); + void *D_U = U._impl->storage()._impl->data(); + void *D_S = S._impl->storage()._impl->data(); + void *D_V = vT._impl->storage()._impl->data(); cutensornetTensorDescriptor_t descTensorIn; cutensornetTensorDescriptor_t descTensorU; diff --git a/src/backend/linalg_internal_gpu/cuQuantumQr_internal.cu b/src/backend/linalg_internal_gpu/cuQuantumQr_internal.cu index 00179ebd..979f0a84 100644 --- a/src/backend/linalg_internal_gpu/cuQuantumQr_internal.cu +++ b/src/backend/linalg_internal_gpu/cuQuantumQr_internal.cu @@ -105,9 +105,9 @@ namespace cytnx { const int32_t numModesQ = modesQ.size(); const int32_t numModesR = modesR.size(); - void *D_T = in->Mem; - void *D_Q = Q->Mem; - void *D_R = R->Mem; + void *D_T = in->data(); + void *D_Q = Q->data(); + void *D_R = R->data(); /****************** * cuTensorNet @@ -243,9 +243,9 @@ namespace cytnx { const int32_t numModesQ = modesQ.size(); const int32_t numModesR = modesR.size(); - void *D_T = in->Mem; - void *D_Q = Q->Mem; - void *D_R = R->Mem; + void *D_T = in->data(); + void *D_Q = Q->data(); + void *D_R = R->data(); /****************** * cuTensorNet @@ -380,9 +380,9 @@ namespace cytnx { const int32_t numModesQ = modesQ.size(); const int32_t numModesR = modesR.size(); - void *D_T = in->Mem; - void *D_Q = Q->Mem; - void *D_R = R->Mem; + void *D_T = in->data(); + void *D_Q = Q->data(); + void *D_R = R->data(); /****************** * cuTensorNet @@ -517,9 +517,9 @@ namespace cytnx { const int32_t numModesQ = modesQ.size(); const int32_t numModesR = modesR.size(); - void *D_T = in->Mem; - void *D_Q = Q->Mem; - void *D_R = R->Mem; + void *D_T = in->data(); + void *D_Q = Q->data(); + void *D_R = R->data(); /****************** * cuTensorNet diff --git a/src/backend/linalg_internal_gpu/cuSub_internal.cu b/src/backend/linalg_internal_gpu/cuSub_internal.cu index c5029bf9..e46e4331 100644 --- a/src/backend/linalg_internal_gpu/cuSub_internal.cu +++ b/src/backend/linalg_internal_gpu/cuSub_internal.cu @@ -140,9 +140,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -262,9 +262,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -381,9 +381,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -500,9 +500,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -620,9 +620,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -740,9 +740,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -860,9 +860,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -980,9 +980,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1100,9 +1100,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1220,9 +1220,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1339,9 +1339,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1462,9 +1462,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1583,9 +1583,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1702,9 +1702,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1821,9 +1821,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -1941,9 +1941,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2061,9 +2061,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2181,9 +2181,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2301,9 +2301,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2421,9 +2421,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2541,9 +2541,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2660,9 +2660,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2783,9 +2783,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2905,9 +2905,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -2970,9 +2970,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3034,9 +3034,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3098,9 +3098,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3162,9 +3162,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3226,9 +3226,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3290,9 +3290,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3354,9 +3354,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3418,9 +3418,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3530,9 +3530,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3654,9 +3654,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3776,9 +3776,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3841,9 +3841,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3905,9 +3905,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -3969,9 +3969,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4033,9 +4033,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4097,9 +4097,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4161,9 +4161,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4225,9 +4225,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4289,9 +4289,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4402,9 +4402,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4526,9 +4526,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4649,9 +4649,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4714,9 +4714,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4778,9 +4778,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4843,9 +4843,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4908,9 +4908,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -4973,9 +4973,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5038,9 +5038,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5103,9 +5103,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5168,9 +5168,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5280,9 +5280,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int64 *_Lin = (cytnx_int64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int64 *_Lin = (cytnx_int64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5404,9 +5404,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5527,9 +5527,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5591,9 +5591,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5655,9 +5655,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5720,9 +5720,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5785,9 +5785,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5850,9 +5850,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5915,9 +5915,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -5980,9 +5980,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6045,9 +6045,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6157,9 +6157,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint64 *_Lin = (cytnx_uint64 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6282,9 +6282,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6405,9 +6405,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6469,9 +6469,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6533,9 +6533,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6598,9 +6598,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6663,9 +6663,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6728,9 +6728,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6793,9 +6793,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6858,9 +6858,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -6923,9 +6923,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7035,9 +7035,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int32 *_Lin = (cytnx_int32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int32 *_Lin = (cytnx_int32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7160,9 +7160,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7283,9 +7283,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7347,9 +7347,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7411,9 +7411,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7476,9 +7476,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7541,9 +7541,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7606,9 +7606,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7671,9 +7671,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7737,9 +7737,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7802,9 +7802,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -7914,9 +7914,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint32 *_Lin = (cytnx_uint32 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8039,9 +8039,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8162,9 +8162,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8226,9 +8226,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8290,9 +8290,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8355,9 +8355,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8420,9 +8420,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8485,9 +8485,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8550,9 +8550,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8616,9 +8616,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8681,9 +8681,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8793,9 +8793,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_int16 *_Lin = (cytnx_int16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_int16 *_Lin = (cytnx_int16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -8918,9 +8918,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9041,9 +9041,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9105,9 +9105,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9169,9 +9169,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9234,9 +9234,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9299,9 +9299,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9364,9 +9364,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9429,9 +9429,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9495,9 +9495,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9560,9 +9560,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9672,9 +9672,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_uint16 *_Lin = (cytnx_uint16 *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9789,9 +9789,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -9904,9 +9904,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10016,9 +10016,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10127,9 +10127,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10238,9 +10238,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int64 *_out = (cytnx_int64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int64 *_Rin = (cytnx_int64 *)Rin->Mem; + cytnx_int64 *_out = (cytnx_int64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int64 *_Rin = (cytnx_int64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10349,9 +10349,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint64 *_out = (cytnx_uint64 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->Mem; + cytnx_uint64 *_out = (cytnx_uint64 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint64 *_Rin = (cytnx_uint64 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10460,9 +10460,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int32 *_out = (cytnx_int32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int32 *_Rin = (cytnx_int32 *)Rin->Mem; + cytnx_int32 *_out = (cytnx_int32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int32 *_Rin = (cytnx_int32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10571,9 +10571,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint32 *_out = (cytnx_uint32 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->Mem; + cytnx_uint32 *_out = (cytnx_uint32 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint32 *_Rin = (cytnx_uint32 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10683,9 +10683,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_int16 *_out = (cytnx_int16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_int16 *_Rin = (cytnx_int16 *)Rin->Mem; + cytnx_int16 *_out = (cytnx_int16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_int16 *_Rin = (cytnx_int16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10794,9 +10794,9 @@ namespace cytnx { const unsigned long long &len, const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_uint16 *_out = (cytnx_uint16 *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->Mem; + cytnx_uint16 *_out = (cytnx_uint16 *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_uint16 *_Rin = (cytnx_uint16 *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; @@ -10859,9 +10859,9 @@ namespace cytnx { const std::vector &shape, const std::vector &invmapper_L, const std::vector &invmapper_R) { - cytnx_bool *_out = (cytnx_bool *)out->Mem; - cytnx_bool *_Lin = (cytnx_bool *)Lin->Mem; - cytnx_bool *_Rin = (cytnx_bool *)Rin->Mem; + cytnx_bool *_out = (cytnx_bool *)out->data(); + cytnx_bool *_Lin = (cytnx_bool *)Lin->data(); + cytnx_bool *_Rin = (cytnx_bool *)Rin->data(); cytnx_uint32 NBlocks = len / 512; if (len % 512) NBlocks += 1; diff --git a/src/backend/linalg_internal_gpu/cuSum_internal.cu b/src/backend/linalg_internal_gpu/cuSum_internal.cu index 914d5f28..ce9ced8a 100644 --- a/src/backend/linalg_internal_gpu/cuSum_internal.cu +++ b/src/backend/linalg_internal_gpu/cuSum_internal.cu @@ -11,54 +11,61 @@ namespace cytnx { void cuSum_internal_cd(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_cd((cytnx_complex128 *)out->Mem, (cytnx_complex128 *)ten->Mem, - Nelem); + utils_internal::cuReduce_gpu_cd((cytnx_complex128 *)out->data(), + (cytnx_complex128 *)ten->data(), Nelem); } void cuSum_internal_cf(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_cf((cytnx_complex64 *)out->Mem, (cytnx_complex64 *)ten->Mem, - Nelem); + utils_internal::cuReduce_gpu_cf((cytnx_complex64 *)out->data(), + (cytnx_complex64 *)ten->data(), Nelem); } void cuSum_internal_d(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_d((cytnx_double *)out->Mem, (cytnx_double *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_d((cytnx_double *)out->data(), (cytnx_double *)ten->data(), + Nelem); } void cuSum_internal_f(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_f((cytnx_float *)out->Mem, (cytnx_float *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_f((cytnx_float *)out->data(), (cytnx_float *)ten->data(), Nelem); } void cuSum_internal_i64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_i64((cytnx_int64 *)out->Mem, (cytnx_int64 *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_i64((cytnx_int64 *)out->data(), (cytnx_int64 *)ten->data(), + Nelem); } void cuSum_internal_u64(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_u64((cytnx_uint64 *)out->Mem, (cytnx_uint64 *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_u64((cytnx_uint64 *)out->data(), (cytnx_uint64 *)ten->data(), + Nelem); } void cuSum_internal_i32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_i32((cytnx_int32 *)out->Mem, (cytnx_int32 *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_i32((cytnx_int32 *)out->data(), (cytnx_int32 *)ten->data(), + Nelem); } void cuSum_internal_u32(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_u32((cytnx_uint32 *)out->Mem, (cytnx_uint32 *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_u32((cytnx_uint32 *)out->data(), (cytnx_uint32 *)ten->data(), + Nelem); } void cuSum_internal_i16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_i16((cytnx_int16 *)out->Mem, (cytnx_int16 *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_i16((cytnx_int16 *)out->data(), (cytnx_int16 *)ten->data(), + Nelem); } void cuSum_internal_u16(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, const char &type) { - utils_internal::cuReduce_gpu_u16((cytnx_uint16 *)out->Mem, (cytnx_uint16 *)ten->Mem, Nelem); + utils_internal::cuReduce_gpu_u16((cytnx_uint16 *)out->data(), (cytnx_uint16 *)ten->data(), + Nelem); } void cuSum_internal_b(boost::intrusive_ptr &out, const boost::intrusive_ptr &ten, const cytnx_uint64 &Nelem, diff --git a/src/backend/linalg_internal_gpu/cuSvd_internal.cu b/src/backend/linalg_internal_gpu/cuSvd_internal.cu index 318bfa73..bacb7b6a 100644 --- a/src/backend/linalg_internal_gpu/cuSvd_internal.cu +++ b/src/backend/linalg_internal_gpu/cuSvd_internal.cu @@ -18,8 +18,8 @@ namespace cytnx { cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -31,20 +31,20 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -59,7 +59,7 @@ namespace cytnx { checkCudaErrors(cusolverDnXgesvdp_bufferSize(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -84,7 +84,7 @@ namespace cytnx { cusolverDnXgesvdp(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -105,10 +105,10 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnXgesvdp': cuBlas INFO = ", info); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); @@ -128,8 +128,8 @@ namespace cytnx { cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -141,21 +141,21 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -170,7 +170,7 @@ namespace cytnx { checkCudaErrors(cusolverDnXgesvdp_bufferSize(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -195,7 +195,7 @@ namespace cytnx { cusolverDnXgesvdp(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -216,10 +216,10 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnXgesvdp': cuBlas INFO = ", info); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); @@ -238,8 +238,8 @@ namespace cytnx { cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -251,21 +251,21 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -280,7 +280,7 @@ namespace cytnx { checkCudaErrors(cusolverDnXgesvdp_bufferSize(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -305,7 +305,7 @@ namespace cytnx { cusolverDnXgesvdp(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -325,10 +325,10 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnXgesvdp': cuBlas INFO = ", info); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); @@ -347,8 +347,8 @@ namespace cytnx { cusolverEigMode_t jobz; // if U and vT are NULL ptr, then it will not be computed. - jobz = (U->dtype == Type.Void and vT->dtype == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR - : CUSOLVER_EIG_MODE_VECTOR; + jobz = (U->dtype() == Type.Void and vT->dtype() == Type.Void) ? CUSOLVER_EIG_MODE_NOVECTOR + : CUSOLVER_EIG_MODE_VECTOR; // const int econ = 0; /* i.e. 'A' in gesvd */ cytnx_int32 econ = 1; /* i.e. 'S' in gesvd */ @@ -360,21 +360,21 @@ namespace cytnx { cuDoubleComplex *Mij; checkCudaErrors(cudaMalloc((void **)&Mij, M * N * sizeof(data_type))); checkCudaErrors( - cudaMemcpy(Mij, in->Mem, sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); + cudaMemcpy(Mij, in->data(), sizeof(data_type) * M * N, cudaMemcpyDeviceToDevice)); cytnx_int64 min = std::min(M, N); cytnx_int64 max = std::max(M, N); cytnx_int64 ldA = N, ldu = N, ldvT = M; void *UMem = nullptr, *vTMem = nullptr; - if (U->Mem) { - UMem = U->Mem; + if (U->data()) { + UMem = U->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&UMem, max * max * sizeof(data_type))); } - if (vT->Mem) { - vTMem = vT->Mem; + if (vT->data()) { + vTMem = vT->data(); } else { if (jobz == CUSOLVER_EIG_MODE_VECTOR) checkCudaErrors(cudaMalloc(&vTMem, max * max * sizeof(data_type))); @@ -389,7 +389,7 @@ namespace cytnx { checkCudaErrors(cusolverDnXgesvdp_bufferSize(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -414,7 +414,7 @@ namespace cytnx { cusolverDnXgesvdp(cusolverH, NULL, /* params */ jobz, econ, N, M, cuda_data_type, /* dataTypeA */ Mij, ldA, cuda_data_typeR, /* dataTypeS */ - S->Mem, cuda_data_type, /* dataTypeU */ + S->data(), cuda_data_type, /* dataTypeU */ vTMem, ldu, /* ldu */ cuda_data_type, /* dataTypeV */ UMem, ldvT, /* ldv */ @@ -434,10 +434,10 @@ namespace cytnx { "Error in cuBlas function 'cusolverDnXgesvdp': cuBlas INFO = ", info); checkCudaErrors(cudaFree(Mij)); - if (UMem != nullptr and U->dtype == Type.Void) { + if (UMem != nullptr and U->dtype() == Type.Void) { checkCudaErrors(cudaFree(UMem)); } - if (vTMem != nullptr and vT->dtype == Type.Void) { + if (vTMem != nullptr and vT->dtype() == Type.Void) { checkCudaErrors(cudaFree(vTMem)); } checkCudaErrors(cudaFree(devinfo)); diff --git a/src/backend/linalg_internal_gpu/cuTensordot_internal.cu b/src/backend/linalg_internal_gpu/cuTensordot_internal.cu index cfd35a6a..eefbb09a 100644 --- a/src/backend/linalg_internal_gpu/cuTensordot_internal.cu +++ b/src/backend/linalg_internal_gpu/cuTensordot_internal.cu @@ -76,9 +76,9 @@ namespace cytnx { labelOut[i] = i + idxl.size(); } - void *outPtr = out._impl->storage()._impl->Mem; - void *lPtr = Lin._impl->storage()._impl->Mem; - void *rPtr = Rin._impl->storage()._impl->Mem; + void *outPtr = out._impl->storage()._impl->data(); + void *lPtr = Lin._impl->storage()._impl->data(); + void *rPtr = Rin._impl->storage()._impl->data(); int nlabelOut = labelOut.size(); int nlabelL = labelL.size(); diff --git a/src/backend/linalg_internal_gpu/cuVectordot_internal.cu b/src/backend/linalg_internal_gpu/cuVectordot_internal.cu index 12a00595..4268f6ae 100644 --- a/src/backend/linalg_internal_gpu/cuVectordot_internal.cu +++ b/src/backend/linalg_internal_gpu/cuVectordot_internal.cu @@ -15,9 +15,9 @@ namespace cytnx { cublasHandle_t cublasH = NULL; checkCudaErrors(cublasCreate(&cublasH)); - cuDoubleComplex *_out = (cuDoubleComplex *)out->Mem; - cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->Mem; - cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->Mem; + cuDoubleComplex *_out = (cuDoubleComplex *)out->data(); + cuDoubleComplex *_Lin = (cuDoubleComplex *)Lin->data(); + cuDoubleComplex *_Rin = (cuDoubleComplex *)Rin->data(); //_out[0] = make_cuDoubleComplex(0.,0.); unsigned long long remain = len; @@ -76,9 +76,9 @@ namespace cytnx { cublasHandle_t cublasH = NULL; checkCudaErrors(cublasCreate(&cublasH)); - cuFloatComplex *_out = (cuFloatComplex *)out->Mem; - cuFloatComplex *_Lin = (cuFloatComplex *)Lin->Mem; - cuFloatComplex *_Rin = (cuFloatComplex *)Rin->Mem; + cuFloatComplex *_out = (cuFloatComplex *)out->data(); + cuFloatComplex *_Lin = (cuFloatComplex *)Lin->data(); + cuFloatComplex *_Rin = (cuFloatComplex *)Rin->data(); //_out[0] = make_cuFloatComplex(0.,0.); unsigned long long remain = len; @@ -135,9 +135,9 @@ namespace cytnx { cublasHandle_t cublasH = NULL; checkCudaErrors(cublasCreate(&cublasH)); - cytnx_double *_out = (cytnx_double *)out->Mem; - cytnx_double *_Lin = (cytnx_double *)Lin->Mem; - cytnx_double *_Rin = (cytnx_double *)Rin->Mem; + cytnx_double *_out = (cytnx_double *)out->data(); + cytnx_double *_Lin = (cytnx_double *)Lin->data(); + cytnx_double *_Rin = (cytnx_double *)Rin->data(); //_out[0] = 0; unsigned long long remain = len; @@ -192,9 +192,9 @@ namespace cytnx { cublasHandle_t cublasH = NULL; checkCudaErrors(cublasCreate(&cublasH)); - cytnx_float *_out = (cytnx_float *)out->Mem; - cytnx_float *_Lin = (cytnx_float *)Lin->Mem; - cytnx_float *_Rin = (cytnx_float *)Rin->Mem; + cytnx_float *_out = (cytnx_float *)out->data(); + cytnx_float *_Lin = (cytnx_float *)Lin->data(); + cytnx_float *_Rin = (cytnx_float *)Rin->data(); //_out[0] = 0; unsigned long long remain = len; diff --git a/src/backend/linalg_internal_gpu/cudaMemcpyTruncation.cu b/src/backend/linalg_internal_gpu/cudaMemcpyTruncation.cu index df2fb47c..53374e24 100644 --- a/src/backend/linalg_internal_gpu/cudaMemcpyTruncation.cu +++ b/src/backend/linalg_internal_gpu/cudaMemcpyTruncation.cu @@ -41,7 +41,8 @@ namespace cytnx { } cytnx_uint64 truc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and truc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + truc_dim - 1 >= mindim) { truc_dim--; } else { break; @@ -54,8 +55,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); @@ -64,10 +65,10 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex128 *)newU._impl->storage()._impl->Mem + src, - (cytnx_complex128 *)U._impl->storage()._impl->Mem + dest, - truc_dim * sizeof(cytnx_complex128), - cudaMemcpyDeviceToDevice)); + HANDLE_CUDA_ERROR( + cudaMemcpy((cytnx_complex128 *)newU._impl->storage()._impl->data() + src, + (cytnx_complex128 *)U._impl->storage()._impl->data() + dest, + truc_dim * sizeof(cytnx_complex128), cudaMemcpyDeviceToDevice)); src += truc_dim; dest += U.shape()[1]; } @@ -76,22 +77,22 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex128 *)newvT._impl->storage()._impl->Mem, - (cytnx_complex128 *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex128 *)newvT._impl->storage()._impl->data(), + (cytnx_complex128 *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_complex128), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; @@ -112,7 +113,8 @@ namespace cytnx { } cytnx_uint64 truc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and truc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + truc_dim - 1 >= mindim) { truc_dim--; } else { break; @@ -125,8 +127,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); @@ -135,10 +137,10 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex64 *)newU._impl->storage()._impl->Mem + src, - (cytnx_complex64 *)U._impl->storage()._impl->Mem + dest, - truc_dim * sizeof(cytnx_complex64), - cudaMemcpyDeviceToDevice)); + HANDLE_CUDA_ERROR( + cudaMemcpy((cytnx_complex64 *)newU._impl->storage()._impl->data() + src, + (cytnx_complex64 *)U._impl->storage()._impl->data() + dest, + truc_dim * sizeof(cytnx_complex64), cudaMemcpyDeviceToDevice)); src += truc_dim; dest += U.shape()[1]; } @@ -147,22 +149,22 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex64 *)newvT._impl->storage()._impl->Mem, - (cytnx_complex64 *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_complex64 *)newvT._impl->storage()._impl->data(), + (cytnx_complex64 *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_complex64), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; @@ -183,7 +185,8 @@ namespace cytnx { } cytnx_uint64 truc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and truc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + truc_dim - 1 >= mindim) { truc_dim--; } else { break; @@ -196,8 +199,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); @@ -206,8 +209,8 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newU._impl->storage()._impl->Mem + src, - (cytnx_double *)U._impl->storage()._impl->Mem + dest, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newU._impl->storage()._impl->data() + src, + (cytnx_double *)U._impl->storage()._impl->data() + dest, truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); src += truc_dim; @@ -218,22 +221,22 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newvT._impl->storage()._impl->Mem, - (cytnx_double *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newvT._impl->storage()._impl->data(), + (cytnx_double *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; @@ -254,7 +257,8 @@ namespace cytnx { } cytnx_uint64 truc_dim = Kdim; for (cytnx_int64 i = Kdim - 1; i >= 0; i--) { - if (((cytnx_double *)S._impl->storage()._impl->Mem)[i] < err and truc_dim - 1 >= mindim) { + if (((cytnx_double *)S._impl->storage()._impl->data())[i] < err and + truc_dim - 1 >= mindim) { truc_dim--; } else { break; @@ -267,8 +271,8 @@ namespace cytnx { // perform the manual truncation Tensor newS = Tensor({truc_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newS._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data(), truc_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); if (is_U) { Tensor newU = Tensor({U.shape()[0], truc_dim}, U.dtype(), U.device()); @@ -277,8 +281,8 @@ namespace cytnx { int dest = 0; // copy with strides. for (int i = 0; i < U.shape()[0]; i++) { - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newU._impl->storage()._impl->Mem + src, - (cytnx_float *)U._impl->storage()._impl->Mem + dest, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newU._impl->storage()._impl->data() + src, + (cytnx_float *)U._impl->storage()._impl->data() + dest, truc_dim * sizeof(cytnx_float), cudaMemcpyDeviceToDevice)); src += truc_dim; dest += U.shape()[1]; @@ -288,22 +292,22 @@ namespace cytnx { if (is_vT) { Tensor newvT = Tensor({truc_dim, vT.shape()[1]}, vT.dtype(), vT.device()); // simply copy a new one dropping the tail. - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newvT._impl->storage()._impl->Mem, - (cytnx_float *)vT._impl->storage()._impl->Mem, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_float *)newvT._impl->storage()._impl->data(), + (cytnx_float *)vT._impl->storage()._impl->data(), vT.shape()[1] * truc_dim * sizeof(cytnx_float), cudaMemcpyDeviceToDevice)); vT = newvT; } if (return_err == 1) { Tensor newterr = Tensor({1}, S.dtype(), S.device()); - ((cytnx_double *)newterr._impl->storage()._impl->Mem)[0] = - ((cytnx_double *)S._impl->storage()._impl->Mem)[truc_dim]; + ((cytnx_double *)newterr._impl->storage()._impl->data())[0] = + ((cytnx_double *)S._impl->storage()._impl->data())[truc_dim]; terr = newterr; } else if (return_err) { cytnx_uint64 discared_dim = S.shape()[0] - truc_dim; Tensor newterr = Tensor({discared_dim}, S.dtype(), S.device()); - HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->Mem, - (cytnx_double *)S._impl->storage()._impl->Mem + truc_dim, + HANDLE_CUDA_ERROR(cudaMemcpy((cytnx_double *)newterr._impl->storage()._impl->data(), + (cytnx_double *)S._impl->storage()._impl->data() + truc_dim, discared_dim * sizeof(cytnx_double), cudaMemcpyDeviceToDevice)); terr = newterr; diff --git a/src/backend/random_internal_cpu/Normal_internal.cpp b/src/backend/random_internal_cpu/Normal_internal.cpp index 77835093..dbaec728 100644 --- a/src/backend/random_internal_cpu/Normal_internal.cpp +++ b/src/backend/random_internal_cpu/Normal_internal.cpp @@ -10,8 +10,8 @@ namespace cytnx { mt19937 eng(seed); std::normal_distribution distro(a, b); - double *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len * 2; i++) { + double *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size() * 2; i++) { rptr[i] = distro(eng); } } @@ -20,8 +20,8 @@ namespace cytnx { mt19937 eng(seed); std::normal_distribution distro(a, b); - float *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len * 2; i++) { + float *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size() * 2; i++) { rptr[i] = distro(eng); } } @@ -29,8 +29,8 @@ namespace cytnx { const unsigned int &seed) { mt19937 eng(seed); std::normal_distribution distro(a, b); - double *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len; i++) { + double *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size(); i++) { rptr[i] = distro(eng); } } @@ -38,8 +38,8 @@ namespace cytnx { const unsigned int &seed) { mt19937 eng(seed); std::normal_distribution distro(a, b); - float *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len; i++) { + float *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size(); i++) { rptr[i] = distro(eng); } } diff --git a/src/backend/random_internal_cpu/Uniform_internal.cpp b/src/backend/random_internal_cpu/Uniform_internal.cpp index 69ef0d5d..01b7e265 100644 --- a/src/backend/random_internal_cpu/Uniform_internal.cpp +++ b/src/backend/random_internal_cpu/Uniform_internal.cpp @@ -10,8 +10,8 @@ namespace cytnx { mt19937 eng(seed); std::uniform_real_distribution distro(a, b); - double *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len * 2; i++) { + double *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size() * 2; i++) { rptr[i] = distro(eng); } } @@ -20,8 +20,8 @@ namespace cytnx { mt19937 eng(seed); std::uniform_real_distribution distro(a, b); - float *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len * 2; i++) { + float *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size() * 2; i++) { rptr[i] = distro(eng); } } @@ -29,8 +29,8 @@ namespace cytnx { const unsigned int &seed) { mt19937 eng(seed); std::uniform_real_distribution distro(a, b); - double *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len; i++) { + double *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size(); i++) { rptr[i] = distro(eng); } } @@ -38,8 +38,8 @@ namespace cytnx { const unsigned int &seed) { mt19937 eng(seed); std::uniform_real_distribution distro(a, b); - float *rptr = static_cast(in->Mem); - for (cytnx_uint64 i = 0; i < in->len; i++) { + float *rptr = static_cast(in->data()); + for (cytnx_uint64 i = 0; i < in->size(); i++) { rptr[i] = distro(eng); } } diff --git a/src/backend/random_internal_gpu/cuNormal_internal.cu b/src/backend/random_internal_gpu/cuNormal_internal.cu index c6af246a..2b18cb41 100644 --- a/src/backend/random_internal_gpu/cuNormal_internal.cu +++ b/src/backend/random_internal_gpu/cuNormal_internal.cu @@ -5,7 +5,7 @@ namespace cytnx { void cuRng_normal_cd(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - double *rptr = static_cast(in->Mem); + double *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -14,13 +14,13 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateNormalDouble(gen, rptr, in->len * 2, a, b); + curandGenerateNormalDouble(gen, rptr, in->size() * 2, a, b); curandDestroyGenerator(gen); } void cuRng_normal_cf(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - float *rptr = static_cast(in->Mem); + float *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -28,13 +28,13 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateNormal(gen, rptr, in->len * 2, a, b); + curandGenerateNormal(gen, rptr, in->size() * 2, a, b); curandDestroyGenerator(gen); } void cuRng_normal_d(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - double *rptr = static_cast(in->Mem); + double *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -42,13 +42,13 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateNormalDouble(gen, rptr, in->len, a, b); + curandGenerateNormalDouble(gen, rptr, in->size(), a, b); curandDestroyGenerator(gen); } void cuRng_normal_f(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - float *rptr = static_cast(in->Mem); + float *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -56,7 +56,7 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateNormal(gen, rptr, in->len, a, b); + curandGenerateNormal(gen, rptr, in->size(), a, b); curandDestroyGenerator(gen); } diff --git a/src/backend/random_internal_gpu/cuUniform_internal.cu b/src/backend/random_internal_gpu/cuUniform_internal.cu index c6a9ffb4..7ac14fd0 100644 --- a/src/backend/random_internal_gpu/cuUniform_internal.cu +++ b/src/backend/random_internal_gpu/cuUniform_internal.cu @@ -5,7 +5,7 @@ namespace cytnx { void cuRng_uniform_cd(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - double *rptr = static_cast(in->Mem); + double *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -14,13 +14,13 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateUniformDouble(gen, rptr, in->len * 2); + curandGenerateUniformDouble(gen, rptr, in->size() * 2); curandDestroyGenerator(gen); } void cuRng_uniform_cf(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - float *rptr = static_cast(in->Mem); + float *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -28,13 +28,13 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateUniform(gen, rptr, in->len * 2); + curandGenerateUniform(gen, rptr, in->size() * 2); curandDestroyGenerator(gen); } void cuRng_uniform_d(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - double *rptr = static_cast(in->Mem); + double *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -42,13 +42,13 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateUniformDouble(gen, rptr, in->len); + curandGenerateUniformDouble(gen, rptr, in->size()); curandDestroyGenerator(gen); } void cuRng_uniform_f(boost::intrusive_ptr &in, const double &a, const double &b, const unsigned int &seed) { - float *rptr = static_cast(in->Mem); + float *rptr = static_cast(in->data()); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); @@ -56,7 +56,7 @@ namespace cytnx { curandSetPseudoRandomGeneratorSeed(gen, seed); // generate: - curandGenerateUniform(gen, rptr, in->len); + curandGenerateUniform(gen, rptr, in->size()); curandDestroyGenerator(gen); } diff --git a/src/backend/utils_internal_cpu/Cast_cpu.cpp b/src/backend/utils_internal_cpu/Cast_cpu.cpp index 62121522..4016f185 100644 --- a/src/backend/utils_internal_cpu/Cast_cpu.cpp +++ b/src/backend/utils_internal_cpu/Cast_cpu.cpp @@ -16,7 +16,7 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_complex128) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_complex128) * len_in); } void Cast_cpu_cdtcf(const boost::intrusive_ptr& in, @@ -27,8 +27,8 @@ namespace cytnx { out->Init(len_in); } - cytnx_complex128* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_complex128* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -43,8 +43,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_complex64* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_complex64* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -59,7 +59,7 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_complex64) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_complex64) * len_in); } void Cast_cpu_dtcd(const boost::intrusive_ptr& in, @@ -69,8 +69,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); @@ -87,8 +87,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) @@ -104,7 +104,7 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_double) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_double) * len_in); } void Cast_cpu_dtf(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -113,8 +113,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -128,8 +128,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -143,8 +143,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -158,8 +158,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -173,8 +173,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -188,8 +188,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -203,8 +203,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -218,8 +218,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -234,8 +234,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) @@ -250,8 +250,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -265,8 +265,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -280,7 +280,7 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_float) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_float) * len_in); } void Cast_cpu_fti64(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -289,8 +289,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -304,8 +304,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -319,8 +319,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -334,8 +334,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -349,8 +349,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -364,8 +364,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -379,8 +379,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -395,8 +395,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) @@ -411,8 +411,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) @@ -427,8 +427,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -442,8 +442,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -457,7 +457,7 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_int64) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_int64) * len_in); } void Cast_cpu_i64tu64(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -466,8 +466,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -481,8 +481,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -496,8 +496,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -511,8 +511,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -526,8 +526,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -541,8 +541,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -558,8 +558,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -573,8 +573,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -588,8 +588,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -603,8 +603,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -618,8 +618,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -633,7 +633,7 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_uint64) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_uint64) * len_in); } void Cast_cpu_u64ti32(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -642,8 +642,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -657,8 +657,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -672,8 +672,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -687,8 +687,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -702,8 +702,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -719,8 +719,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -734,8 +734,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) @@ -750,8 +750,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -765,8 +765,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -780,8 +780,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -795,8 +795,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -810,7 +810,7 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_int32) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_int32) * len_in); } void Cast_cpu_i32tu32(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -819,8 +819,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -834,8 +834,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -849,8 +849,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -864,8 +864,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -880,8 +880,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -895,8 +895,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) @@ -911,8 +911,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -926,8 +926,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -941,8 +941,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -956,8 +956,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -971,8 +971,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -986,7 +986,7 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_uint32) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_uint32) * len_in); } void Cast_cpu_u32tu16(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -995,8 +995,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1010,8 +1010,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1025,8 +1025,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1041,8 +1041,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1056,8 +1056,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1071,8 +1071,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1086,8 +1086,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1101,8 +1101,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1116,8 +1116,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1131,8 +1131,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1146,8 +1146,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1161,7 +1161,7 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_uint16) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_uint16) * len_in); } void Cast_cpu_u16ti16(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -1170,8 +1170,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1185,8 +1185,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1201,8 +1201,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1216,8 +1216,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1231,8 +1231,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1246,8 +1246,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1261,8 +1261,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1276,8 +1276,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1291,8 +1291,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1306,8 +1306,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1321,8 +1321,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1336,7 +1336,7 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_int16) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_int16) * len_in); } void Cast_cpu_i16tb(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -1345,8 +1345,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1361,8 +1361,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_complex128* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_complex128* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex128) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1376,8 +1376,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_complex64* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_complex64* _out = static_cast(out->data()); memset(_out, 0, sizeof(cytnx_complex64) * len_in); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1391,8 +1391,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1406,8 +1406,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1421,8 +1421,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1436,8 +1436,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1451,8 +1451,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1466,8 +1466,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1481,8 +1481,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1496,8 +1496,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); #pragma omp parallel for schedule(dynamic) for (unsigned long long i = 0; i < len_in; i++) { @@ -1511,7 +1511,7 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in); } - memcpy(out->Mem, in->Mem, sizeof(cytnx_bool) * len_in); + memcpy(out->data(), in->data(), sizeof(cytnx_bool) * len_in); } } // namespace utils_internal diff --git a/src/backend/utils_internal_cpu/Movemem_cpu.cpp b/src/backend/utils_internal_cpu/Movemem_cpu.cpp index b19c529b..db8848ec 100644 --- a/src/backend/utils_internal_cpu/Movemem_cpu.cpp +++ b/src/backend/utils_internal_cpu/Movemem_cpu.cpp @@ -28,7 +28,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.cy_typeid(T()), + in->dtype() != Type.cy_typeid(T()), "[DEBUG][internal error] in.dtype_str is [%s] but call MoveMemoryCpu with type %s", in->dtype_str().c_str(), Type.getname(Type.cy_typeid(T()))); #endif @@ -47,8 +47,8 @@ namespace cytnx { accu_new *= newshape[i]; } - T *des = (T *)malloc(in->cap * sizeof(T)); - T *src = static_cast(in->Mem); + T *des = (T *)malloc(in->capacity() * sizeof(T)); + T *src = static_cast(in->data()); #ifdef UNI_OMP std::vector> old_inds; @@ -109,11 +109,11 @@ namespace cytnx { out = new BoolStorage(); } if (is_inplace) { - memcpy(in->Mem, des, sizeof(T) * accu_old); + memcpy(in->data(), des, sizeof(T) * accu_old); free(des); return out; } else { - out->_Init_byptr(des, accu_old, in->device, true, in->cap); + out->_Init_byptr(des, accu_old, in->device(), true, in->capacity()); return out; } } @@ -126,19 +126,16 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.cy_typeid(T()), + in->dtype() != Type.cy_typeid(T()), "[DEBUG][internal error] in.dtype_str is [%s] but call MoveMemoryCpu with type %s", in->dtype_str().c_str(), Type.getname(Type.cy_typeid(T()))); #endif - T *des = (T *)malloc(in->cap * sizeof(T)); - T *src = static_cast(in->Mem); + T *des = (T *)malloc(in->capacity() * sizeof(T)); + T *src = static_cast(in->data()); cytnx_uint64 accu_old = 1, accu_new = 1; #ifdef UNI_HPTT - #ifdef UNI_DEBUG - cytnx_error_msg(true, "[DEBUG][Internal prompt] USE HPTT%s", "\n"); - #endif if (in->size() > 64) { std::vector perm(mapper.begin(), mapper.end()); std::vector size(old_shape.begin(), old_shape.end()); @@ -263,11 +260,11 @@ namespace cytnx { out = new FloatStorage(); } if (is_inplace) { - memcpy(in->Mem, des, sizeof(T) * accu_old); + memcpy(in->data(), des, sizeof(T) * accu_old); free(des); return out; } else { - out->_Init_byptr(des, accu_old, in->device, true, in->cap); + out->_Init_byptr(des, accu_old, in->device(), true, in->capacity()); return out; } } diff --git a/src/backend/utils_internal_cpu/SetArange_cpu.cpp b/src/backend/utils_internal_cpu/SetArange_cpu.cpp index 29742263..8afcfa28 100644 --- a/src/backend/utils_internal_cpu/SetArange_cpu.cpp +++ b/src/backend/utils_internal_cpu/SetArange_cpu.cpp @@ -6,7 +6,7 @@ namespace cytnx { void SetArange_cpu_cd(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_complex128 *ptr = (cytnx_complex128 *)in->Mem; + cytnx_complex128 *ptr = (cytnx_complex128 *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -17,7 +17,7 @@ namespace cytnx { void SetArange_cpu_cf(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_complex64 *ptr = (cytnx_complex64 *)in->Mem; + cytnx_complex64 *ptr = (cytnx_complex64 *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -28,7 +28,7 @@ namespace cytnx { void SetArange_cpu_d(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_double *ptr = (cytnx_double *)in->Mem; + cytnx_double *ptr = (cytnx_double *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -38,7 +38,8 @@ namespace cytnx { void SetArange_cpu_f(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_float *ptr = (cytnx_float *)in->Mem; + cytnx_float *ptr = (cytnx_float *)in->data(); + #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { ptr[n] = start + n * step; @@ -47,7 +48,8 @@ namespace cytnx { void SetArange_cpu_i64(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_int64 *ptr = (cytnx_int64 *)in->Mem; + cytnx_int64 *ptr = (cytnx_int64 *)in->data(); + #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { ptr[n] = start + n * step; @@ -56,7 +58,7 @@ namespace cytnx { void SetArange_cpu_u64(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_uint64 *ptr = (cytnx_uint64 *)in->Mem; + cytnx_uint64 *ptr = (cytnx_uint64 *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -66,7 +68,7 @@ namespace cytnx { void SetArange_cpu_i32(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_int32 *ptr = (cytnx_int32 *)in->Mem; + cytnx_int32 *ptr = (cytnx_int32 *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -76,7 +78,7 @@ namespace cytnx { void SetArange_cpu_u32(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_uint32 *ptr = (cytnx_uint32 *)in->Mem; + cytnx_uint32 *ptr = (cytnx_uint32 *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -86,7 +88,7 @@ namespace cytnx { void SetArange_cpu_i16(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_int16 *ptr = (cytnx_int16 *)in->Mem; + cytnx_int16 *ptr = (cytnx_int16 *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -96,7 +98,7 @@ namespace cytnx { void SetArange_cpu_u16(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_uint16 *ptr = (cytnx_uint16 *)in->Mem; + cytnx_uint16 *ptr = (cytnx_uint16 *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { @@ -106,7 +108,7 @@ namespace cytnx { void SetArange_cpu_b(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_bool *ptr = (cytnx_bool *)in->Mem; + cytnx_bool *ptr = (cytnx_bool *)in->data(); #pragma omp parallel for schedule(dynamic) for (cytnx_uint64 n = 0; n < Nelem; n++) { diff --git a/src/backend/utils_internal_cpu/blocks_mvelems_cpu.cpp b/src/backend/utils_internal_cpu/blocks_mvelems_cpu.cpp index 59843c30..482e99a3 100644 --- a/src/backend/utils_internal_cpu/blocks_mvelems_cpu.cpp +++ b/src/backend/utils_internal_cpu/blocks_mvelems_cpu.cpp @@ -103,8 +103,8 @@ namespace cytnx { } auto dest_mem = - dest_blocks[dest_outer2inner_row[new_row].first]._impl->storage()._impl->Mem; - auto src_mem = src_blocks[b]._impl->storage()._impl->Mem; + dest_blocks[dest_outer2inner_row[new_row].first]._impl->storage()._impl->data(); + auto src_mem = src_blocks[b]._impl->storage()._impl->data(); cytnx_int64 dest_idx = (dest_outer2inner_row[new_row].second) * (dest_blocks[dest_outer2inner_row[new_row].first].shape()[1]) + dest_outer2inner_col[new_col].second; diff --git a/src/backend/utils_internal_gpu/cuCast_gpu.cu b/src/backend/utils_internal_gpu/cuCast_gpu.cu index 48c6a821..7adfefea 100644 --- a/src/backend/utils_internal_gpu/cuCast_gpu.cu +++ b/src/backend/utils_internal_gpu/cuCast_gpu.cu @@ -53,8 +53,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_complex128) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_complex128) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_cdtcf(const boost::intrusive_ptr& in, @@ -65,8 +65,8 @@ namespace cytnx { out->Init(len_in, alloc_device); } - cuDoubleComplex* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); + cuDoubleComplex* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -80,8 +80,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cuFloatComplex* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); + cuFloatComplex* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -95,8 +95,8 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_complex64) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_complex64) * len_in, + cudaMemcpyDeviceToDevice)); } //----------------------------- @@ -107,9 +107,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_double* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -123,9 +123,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_double* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -139,8 +139,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_double) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_double) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_dtf(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -149,8 +149,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -163,8 +163,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2r<<>>(_in, _out, len_in); @@ -176,8 +176,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -190,8 +190,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -204,8 +204,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -218,8 +218,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -232,8 +232,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -246,8 +246,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_double* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_double* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -261,9 +261,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_float* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -276,9 +276,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_float* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -290,8 +290,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -304,8 +304,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_float) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_float) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_fti64(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -314,8 +314,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -328,8 +328,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -342,8 +342,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -356,8 +356,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -370,8 +370,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -384,8 +384,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -398,8 +398,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_float* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_float* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -414,9 +414,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_int64* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -428,9 +428,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_int64* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -443,8 +443,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -457,8 +457,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -471,8 +471,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_int64) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_int64) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_i64tu64(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, @@ -481,8 +481,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -495,8 +495,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -509,8 +509,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -523,8 +523,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -537,8 +537,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -551,8 +551,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_int64* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_int64* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -567,9 +567,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_uint64* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -581,9 +581,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_uint64* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -595,8 +595,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -609,8 +609,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -623,8 +623,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -637,8 +637,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_uint64) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_uint64) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_u64ti32(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, @@ -647,8 +647,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -661,8 +661,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -675,8 +675,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -689,8 +689,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -703,8 +703,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_uint64* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_uint64* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -719,9 +719,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_int32* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -733,9 +733,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_int32* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -747,8 +747,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -761,8 +761,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -775,8 +775,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -789,8 +789,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -803,8 +803,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_int32) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_int32) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_i32tu32(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, @@ -813,8 +813,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -827,8 +827,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -841,8 +841,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -855,8 +855,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_int32* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_int32* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -870,9 +870,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_uint32* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -884,9 +884,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_uint32* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -898,8 +898,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -912,8 +912,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -926,8 +926,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -940,8 +940,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -954,8 +954,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -968,8 +968,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_uint32) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_uint32) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_u32tu16(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, @@ -978,8 +978,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -992,8 +992,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1006,8 +1006,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_uint32* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_uint32* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1021,9 +1021,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_uint16* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -1035,9 +1035,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_uint16* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -1049,8 +1049,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1063,8 +1063,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1077,8 +1077,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1091,8 +1091,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1105,8 +1105,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1119,8 +1119,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1133,8 +1133,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_uint16) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_uint16) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_u16ti16(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, @@ -1143,8 +1143,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1157,8 +1157,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_uint16* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_uint16* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1172,9 +1172,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_int16* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -1186,9 +1186,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_int16* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -1200,8 +1200,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1214,8 +1214,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1228,8 +1228,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1242,8 +1242,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1256,8 +1256,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1270,8 +1270,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1284,8 +1284,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1298,8 +1298,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_int16) * len_in, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(out->data(), in->data(), sizeof(cytnx_int16) * len_in, + cudaMemcpyDeviceToDevice)); } void cuCast_gpu_i16tb(const boost::intrusive_ptr& in, boost::intrusive_ptr& out, const unsigned long long& len_in, @@ -1308,8 +1308,8 @@ namespace cytnx { out = boost::intrusive_ptr(new BoolStorage()); out->Init(len_in, alloc_device); } - cytnx_int16* _in = static_cast(in->Mem); - cytnx_bool* _out = static_cast(out->Mem); + cytnx_int16* _in = static_cast(in->data()); + cytnx_bool* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1323,9 +1323,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexDoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cuDoubleComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuDoubleComplex) * len_in); + cytnx_bool* _in = static_cast(in->data()); + cuDoubleComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuDoubleComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cd<<>>(_in, _out, len_in); @@ -1337,9 +1337,9 @@ namespace cytnx { out = boost::intrusive_ptr(new ComplexFloatStorage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cuFloatComplex* _out = static_cast(out->Mem); - cudaMemset(out->Mem, 0, sizeof(cuFloatComplex) * len_in); + cytnx_bool* _in = static_cast(in->data()); + cuFloatComplex* _out = static_cast(out->data()); + cudaMemset(out->data(), 0, sizeof(cuFloatComplex) * len_in); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; cuCastElem_kernel_r2cf<<>>(_in, _out, len_in); @@ -1351,8 +1351,8 @@ namespace cytnx { out = boost::intrusive_ptr(new DoubleStorage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_double* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_double* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1365,8 +1365,8 @@ namespace cytnx { out = boost::intrusive_ptr(new FloatStorage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_float* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_float* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1379,8 +1379,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int64Storage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_int64* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_int64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1393,8 +1393,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint64Storage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_uint64* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_uint64* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1407,8 +1407,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int32Storage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_int32* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_int32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1421,8 +1421,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint32Storage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_uint32* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_uint32* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1435,8 +1435,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Uint16Storage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_uint16* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_uint16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1449,8 +1449,8 @@ namespace cytnx { out = boost::intrusive_ptr(new Int16Storage()); out->Init(len_in, alloc_device); } - cytnx_bool* _in = static_cast(in->Mem); - cytnx_int16* _out = static_cast(out->Mem); + cytnx_bool* _in = static_cast(in->data()); + cytnx_int16* _out = static_cast(out->data()); cytnx_uint64 NBlocks = len_in / 512; if (len_in % 512) NBlocks += 1; @@ -1464,7 +1464,7 @@ namespace cytnx { out->Init(len_in, alloc_device); } checkCudaErrors( - cudaMemcpy(out->Mem, in->Mem, sizeof(cytnx_bool) * len_in, cudaMemcpyDeviceToDevice)); + cudaMemcpy(out->data(), in->data(), sizeof(cytnx_bool) * len_in, cudaMemcpyDeviceToDevice)); } } // namespace utils_internal diff --git a/src/backend/utils_internal_gpu/cuMovemem_gpu.cu b/src/backend/utils_internal_gpu/cuMovemem_gpu.cu index dd6cc6a9..8ceb203c 100644 --- a/src/backend/utils_internal_gpu/cuMovemem_gpu.cu +++ b/src/backend/utils_internal_gpu/cuMovemem_gpu.cu @@ -109,10 +109,10 @@ namespace cytnx { unsigned int dtype_T = Type_class::cy_typeid(proxy); #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != dtype_T, + in->dtype() != dtype_T, "[DEBUG][internal error] in.dtype_str is [%s] but call cuMovemem_gpu with type %s", in->dtype_str().c_str(), Type.getname(dtype_T)); - cytnx_error_msg(in->device == Device.cpu, "%s", + cytnx_error_msg(in->device() == Device.cpu, "%s", "[DEBUG][internal error] in.device is on cpu but all cuda function."); #endif @@ -140,12 +140,12 @@ namespace cytnx { cuT *dtmp; cytnx_uint64 Nelem = accu_old; - cudaSetDevice(in->device); // ensure the following allocation on the same device as src. + cudaSetDevice(in->device()); // ensure the following allocation on the same device as src. checkCudaErrors( cudaMalloc((void **)&dshifter_old, sizeof(cytnx_uint64) * shifter_old.size())); checkCudaErrors(cudaMalloc((void **)&dperm_shifter_new, sizeof(cytnx_uint64) * permuted_shifter_new.size())); - dtmp = (cuT *)cuMalloc_gpu(sizeof(cuT) * in->cap); + dtmp = (cuT *)cuMalloc_gpu(sizeof(cuT) * in->capacity()); /// copy psn-vec/so-vec to device checkCudaErrors(cudaMemcpy(dperm_shifter_new, &permuted_shifter_new[0], @@ -161,7 +161,7 @@ namespace cytnx { NBlocks += 1; } cuMovemem_kernel<<>>( - dtmp, (cuT *)in->Mem, dshifter_old, dperm_shifter_new, old_shape.size(), Nelem); + dtmp, (cuT *)in->data(), dshifter_old, dperm_shifter_new, old_shape.size(), Nelem); /// house keeping: checkCudaErrors(cudaFree(dshifter_old)); @@ -170,12 +170,12 @@ namespace cytnx { boost::intrusive_ptr out = __SII.USIInit[dtype_T](); if (is_inplace) { /// cpy back: - checkCudaErrors(cudaMemcpy(in->Mem, dtmp, sizeof(T) * Nelem, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(in->data(), dtmp, sizeof(T) * Nelem, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaFree(dtmp)); return out; } else { - out->_Init_byptr(dtmp, Nelem, in->device, true, in->cap); + out->_Init_byptr(dtmp, Nelem, in->device(), true, in->capacity()); return out; } } @@ -191,16 +191,16 @@ namespace cytnx { unsigned int dtype_T = Type_class::cy_typeid(proxy); #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != dtype_T, + in->dtype() != dtype_T, "[DEBUG][internal error] in.dtype_str is [%s] but call cuMovemem_cutt with type %s", in->dtype_str().c_str(), Type.getname(dtype_T)); - cytnx_error_msg(in->device == Device.cpu, "%s", + cytnx_error_msg(in->device() == Device.cpu, "%s", "[DEBUG][internal error] in.device is on cpu but all cuda function."); #endif cuT *dtmp; - dtmp = (cuT *)cuMalloc_gpu(sizeof(cuT) * in->cap); - cytnx_uint64 Nelem = in->len; + dtmp = (cuT *)cuMalloc_gpu(sizeof(cuT) * in->capacity()); + cytnx_uint64 Nelem = in->size(); std::vector perm(mapper.begin(), mapper.end()); std::vector size(old_shape.begin(), old_shape.end()); std::reverse(size.begin(), size.end()); // matching API CUTT @@ -208,18 +208,18 @@ namespace cytnx { cuttHandle plan; cuttPlan(&plan, perm.size(), size.data(), perm.data(), sizeof(cuT), 0); - cuttExecute(plan, in->Mem, dtmp); + cuttExecute(plan, in->data(), dtmp); cuttDestroy(plan); boost::intrusive_ptr out = __SII.USIInit[dtype_T](); if (is_inplace) { /// cpy back: - checkCudaErrors(cudaMemcpy(in->Mem, dtmp, sizeof(T) * Nelem, cudaMemcpyDeviceToDevice)); + checkCudaErrors(cudaMemcpy(in->data(), dtmp, sizeof(T) * Nelem, cudaMemcpyDeviceToDevice)); cudaFree(dtmp); return in; } else { - out->_Init_byptr(dtmp, Nelem, in->device, true, in->cap); + out->_Init_byptr(dtmp, Nelem, in->device(), true, in->capacity()); return out; } } @@ -236,15 +236,16 @@ namespace cytnx { unsigned int dtype_T = Type_class::cy_typeid(DType()); #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != dtype_T, + in->dtype() != dtype_T, "[DEBUG][internal error] in.dtype_str is [%s] but call cuMovemem_cutt with type %s", in->dtype_str().c_str(), Type.getname(dtype_T)); - cytnx_error_msg(in->device == Device.cpu, "%s", + cytnx_error_msg(in->device() == Device.cpu, "%s", "[DEBUG][internal error] in.device is on cpu but all cuda function."); #endif - CudaType *dtmp = reinterpret_cast(cuMalloc_gpu(sizeof(CudaType) * in->cap)); - cytnx_uint64 Nelem = in->len; + CudaType *dtmp = + reinterpret_cast(cuMalloc_gpu(sizeof(CudaType) * in->capacity())); + cytnx_uint64 Nelem = in->size(); std::vector perm(mapper.begin(), mapper.end()); std::vector size(old_shape.begin(), old_shape.end()); @@ -311,7 +312,7 @@ namespace cytnx { checkCudaErrors( cutensorCreatePlan(handle, &plan, desc, planPref, 0 /* workspaceSizeLimit */)); - checkCudaErrors(cutensorPermute(handle, plan, &one, reinterpret_cast(in->Mem), + checkCudaErrors(cutensorPermute(handle, plan, &one, reinterpret_cast(in->data()), dtmp, 0 /* stream */)); checkCudaErrors(cutensorDestroyTensorDescriptor(descA)); @@ -323,12 +324,13 @@ namespace cytnx { boost::intrusive_ptr out = __SII.USIInit[dtype_T](); if (is_inplace) { /// cpy back: - checkCudaErrors(cudaMemcpy(in->Mem, dtmp, sizeof(DType) * Nelem, cudaMemcpyDeviceToDevice)); + checkCudaErrors( + cudaMemcpy(in->data(), dtmp, sizeof(DType) * Nelem, cudaMemcpyDeviceToDevice)); cudaFree(dtmp); return out; } else { - out->_Init_byptr(dtmp, Nelem, in->device, true, in->cap); + out->_Init_byptr(dtmp, Nelem, in->device(), true, in->capacity()); return out; } } diff --git a/src/backend/utils_internal_gpu/cuSetArange_gpu.cu b/src/backend/utils_internal_gpu/cuSetArange_gpu.cu index 52fb5ac1..4f3c8f19 100644 --- a/src/backend/utils_internal_gpu/cuSetArange_gpu.cu +++ b/src/backend/utils_internal_gpu/cuSetArange_gpu.cu @@ -31,7 +31,7 @@ namespace cytnx { void cuSetArange_gpu_cd(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cuDoubleComplex *ptr = (cuDoubleComplex *)in->Mem; + cuDoubleComplex *ptr = (cuDoubleComplex *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -40,7 +40,7 @@ namespace cytnx { void cuSetArange_gpu_cf(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cuFloatComplex *ptr = (cuFloatComplex *)in->Mem; + cuFloatComplex *ptr = (cuFloatComplex *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -49,7 +49,7 @@ namespace cytnx { void cuSetArange_gpu_d(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_double *ptr = (cytnx_double *)in->Mem; + cytnx_double *ptr = (cytnx_double *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -58,7 +58,7 @@ namespace cytnx { void cuSetArange_gpu_f(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_float *ptr = (cytnx_float *)in->Mem; + cytnx_float *ptr = (cytnx_float *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -67,7 +67,7 @@ namespace cytnx { void cuSetArange_gpu_i64(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_int64 *ptr = (cytnx_int64 *)in->Mem; + cytnx_int64 *ptr = (cytnx_int64 *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -76,7 +76,7 @@ namespace cytnx { void cuSetArange_gpu_u64(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_uint64 *ptr = (cytnx_uint64 *)in->Mem; + cytnx_uint64 *ptr = (cytnx_uint64 *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -85,7 +85,7 @@ namespace cytnx { void cuSetArange_gpu_i32(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_int32 *ptr = (cytnx_int32 *)in->Mem; + cytnx_int32 *ptr = (cytnx_int32 *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -94,7 +94,7 @@ namespace cytnx { void cuSetArange_gpu_u32(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_uint32 *ptr = (cytnx_uint32 *)in->Mem; + cytnx_uint32 *ptr = (cytnx_uint32 *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -103,7 +103,7 @@ namespace cytnx { void cuSetArange_gpu_i16(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_int16 *ptr = (cytnx_int16 *)in->Mem; + cytnx_int16 *ptr = (cytnx_int16 *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -112,7 +112,7 @@ namespace cytnx { void cuSetArange_gpu_u16(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_uint16 *ptr = (cytnx_uint16 *)in->Mem; + cytnx_uint16 *ptr = (cytnx_uint16 *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; @@ -121,7 +121,7 @@ namespace cytnx { void cuSetArange_gpu_b(boost::intrusive_ptr &in, const cytnx_double &start, const cytnx_double &end, const cytnx_double &step, const cytnx_uint64 &Nelem) { - cytnx_bool *ptr = (cytnx_bool *)in->Mem; + cytnx_bool *ptr = (cytnx_bool *)in->data(); cytnx_uint64 NBlocks = Nelem / 512; if (Nelem % 512) NBlocks += 1; diff --git a/src/backend/utils_internal_gpu/cuTNPerm_gpu.cu b/src/backend/utils_internal_gpu/cuTNPerm_gpu.cu index 459aa547..5c7cee60 100644 --- a/src/backend/utils_internal_gpu/cuTNPerm_gpu.cu +++ b/src/backend/utils_internal_gpu/cuTNPerm_gpu.cu @@ -24,7 +24,7 @@ namespace cytnx { const std::vector &invmapper, const bool is_inplace) { #ifdef UNI_DEBUG - cytnx_error_msg(in->dtype != Type.ComplexDouble, + cytnx_error_msg(in->dtype() != Type.ComplexDouble, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with " "type ComplexDouble", in->dtype_str().c_str()); @@ -120,7 +120,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.ComplexFloat, + in->dtype() != Type.ComplexFloat, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type ComplexFloat", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -214,7 +214,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Double, + in->dtype() != Type.Double, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Double", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -309,7 +309,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Float, + in->dtype() != Type.Float, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Float", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -402,7 +402,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Int64, + in->dtype() != Type.Int64, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Int64", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -494,7 +494,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Uint64, + in->dtype() != Type.Uint64, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Uint64", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -585,7 +585,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Int32, + in->dtype() != Type.Int32, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Int32", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -678,7 +678,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Uint32, + in->dtype() != Type.Uint32, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Uint32", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -769,7 +769,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Uint16, + in->dtype() != Type.Uint16, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Uint16", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -859,7 +859,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Int16, + in->dtype() != Type.Int16, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Int16", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", @@ -949,7 +949,7 @@ namespace cytnx { const bool is_inplace) { #ifdef UNI_DEBUG cytnx_error_msg( - in->dtype != Type.Bool, + in->dtype() != Type.Bool, "[DEBUG][internal error] in.dtype_str is [%s] but call cuTNPerm_gpu with type Bool", in->dtype_str().c_str()); cytnx_error_msg(in->device == Device.cpu, "%s", diff --git a/src/linalg/Add.cpp b/src/linalg/Add.cpp index c56393f8..e5f7bd5d 100644 --- a/src/linalg/Add.cpp +++ b/src/linalg/Add.cpp @@ -406,10 +406,8 @@ namespace cytnx { template <> Tensor Add(const Scalar &lc, const Tensor &Rt) { - Storage Cnst; // create a shallow container without allocate. Using base! - - Cnst._impl->Mem = lc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, lc.dtype()); + Cnst.set_item(0, lc); Tensor out; out._impl = Rt._impl->_clone_meta_only(); @@ -431,10 +429,6 @@ namespace cytnx { "\n"); #endif } - - // swap back to prevent also free by recycle mech. - Cnst._impl->Mem = nullptr; - return out; } diff --git a/src/linalg/Cpr.cpp b/src/linalg/Cpr.cpp index 69286812..e324209e 100644 --- a/src/linalg/Cpr.cpp +++ b/src/linalg/Cpr.cpp @@ -347,11 +347,8 @@ namespace cytnx { template <> Tensor Cpr(const Scalar &lc, const Tensor &Rt) { - Storage Cnst; - // Cnst.at(0) = lc; - - Cnst._impl->Mem = lc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, lc.dtype()); + Cnst.set_item(0, lc); Tensor out(Rt.shape(), Type.Bool, Rt.device()); @@ -370,7 +367,6 @@ namespace cytnx { "\n"); #endif } - Cnst._impl->Mem = nullptr; return out; } diff --git a/src/linalg/Det.cpp b/src/linalg/Det.cpp index 938d95e6..2833a658 100644 --- a/src/linalg/Det.cpp +++ b/src/linalg/Det.cpp @@ -28,7 +28,7 @@ namespace cytnx { } if (Tl.device() == Device.cpu) { - cytnx::linalg_internal::lii.Det_ii[_tl.dtype()](out._impl->storage()._impl->Mem, + cytnx::linalg_internal::lii.Det_ii[_tl.dtype()](out._impl->storage()._impl->data(), _tl._impl->storage()._impl, Tl.shape()[0]); return out; @@ -38,7 +38,7 @@ namespace cytnx { // cytnx_error_msg(true, "[Det] on GPU Developing!%s", "\n"); checkCudaErrors(cudaSetDevice(Tl.device())); cytnx::linalg_internal::lii.cuDet_ii[_tl.dtype()]( - out._impl->storage()._impl->Mem, _tl._impl->storage()._impl, Tl.shape()[0]); + out._impl->storage()._impl->data(), _tl._impl->storage()._impl, Tl.shape()[0]); return out; #else diff --git a/src/linalg/Div.cpp b/src/linalg/Div.cpp index edeccf60..ea819630 100644 --- a/src/linalg/Div.cpp +++ b/src/linalg/Div.cpp @@ -399,10 +399,8 @@ namespace cytnx { template <> Tensor Div(const Scalar &lc, const Tensor &Rt) { - Storage Cnst; // create a shallow container without allocate. Using base! - - Cnst._impl->Mem = lc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, lc.dtype()); + Cnst.set_item(0, lc); Tensor out; out._impl = Rt._impl->_clone_meta_only(); @@ -424,10 +422,6 @@ namespace cytnx { "\n"); #endif } - - // swap back to prevent also free by recycle mech. - Cnst._impl->Mem = nullptr; - return out; } @@ -751,10 +745,8 @@ namespace cytnx { template <> Tensor Div(const Tensor &Lt, const Scalar &rc) { - Storage Cnst; // create a shallow container without allocate. Using base! - - Cnst._impl->Mem = rc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, rc.dtype()); + Cnst.set_item(0, rc); Tensor out; out._impl = Lt._impl->_clone_meta_only(); @@ -777,9 +769,6 @@ namespace cytnx { #endif } - // swap back to prevent also free by recycle mech. - Cnst._impl->Mem = nullptr; - return out; } diff --git a/src/linalg/Gemm_Batch.cpp b/src/linalg/Gemm_Batch.cpp index 3bbf08a4..22be5ea7 100644 --- a/src/linalg/Gemm_Batch.cpp +++ b/src/linalg/Gemm_Batch.cpp @@ -238,9 +238,9 @@ namespace cytnx { void *a_array[tmp_a_tensors.size()], *b_array[tmp_b_tensors.size()], *c_array[c_tensors.size()]; for (cytnx_uint64 i = 0; i < a_tensors.size(); i++) { - a_array[i] = tmp_a_tensors[i].storage()._impl->Mem; - b_array[i] = tmp_b_tensors[i].storage()._impl->Mem; - c_array[i] = c_tensors[i].storage()._impl->Mem; + a_array[i] = tmp_a_tensors[i].storage()._impl->data(); + b_array[i] = tmp_b_tensors[i].storage()._impl->data(); + c_array[i] = c_tensors[i].storage()._impl->data(); } vector ms(vec_cast(m_array)), ns(vec_cast(n_array)), ks(vec_cast(k_array)); diff --git a/src/linalg/Mod.cpp b/src/linalg/Mod.cpp index a59488af..04c0cce9 100644 --- a/src/linalg/Mod.cpp +++ b/src/linalg/Mod.cpp @@ -392,11 +392,8 @@ namespace cytnx { template <> Tensor Mod(const Scalar &lc, const Tensor &Rt) { - Storage Cnst; - - // Cnst.at(0) = lc; - Cnst._impl->Mem = lc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, lc.dtype()); + Cnst.set_item(0, lc); Tensor out; out._impl = Rt._impl->_clone_meta_only(); //(Rt.shape(),Type.ComplexDouble,Rt.device()); @@ -420,8 +417,6 @@ namespace cytnx { #endif } - Cnst._impl->Mem = nullptr; - return out; } @@ -744,11 +739,8 @@ namespace cytnx { template <> Tensor Mod(const Tensor &Lt, const Scalar &rc) { - Storage Cnst; - // Cnst.at(0) = rc; - - Cnst._impl->Mem = rc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, rc.dtype()); + Cnst.set_item(0, rc); Tensor out; out._impl = Lt._impl->_clone_meta_only(); @@ -772,9 +764,6 @@ namespace cytnx { #endif } - // swap back to prevent also free by recycle mech. - Cnst._impl->Mem = nullptr; - return out; } diff --git a/src/linalg/Mul.cpp b/src/linalg/Mul.cpp index 05a97aba..f0a96de5 100644 --- a/src/linalg/Mul.cpp +++ b/src/linalg/Mul.cpp @@ -401,9 +401,8 @@ namespace cytnx { template <> Tensor Mul(const Scalar &lc, const Tensor &Rt) { - Storage Cnst = Storage(); // create a shallow container without allocate. Using base! - Cnst._impl->Mem = lc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, lc.dtype()); + Cnst.set_item(0, lc); Tensor out; out._impl = Rt._impl->_clone_meta_only(); @@ -426,9 +425,6 @@ namespace cytnx { #endif } - // swap back to prevent also free by recycle mech. - Cnst._impl->Mem = nullptr; - return out; } diff --git a/src/linalg/Norm.cpp b/src/linalg/Norm.cpp index b2663a39..80113f3b 100644 --- a/src/linalg/Norm.cpp +++ b/src/linalg/Norm.cpp @@ -34,7 +34,7 @@ namespace cytnx { } if (Tl.device() == Device.cpu) { - cytnx::linalg_internal::lii.Norm_ii[_tl.dtype()](out._impl->storage()._impl->Mem, + cytnx::linalg_internal::lii.Norm_ii[_tl.dtype()](out._impl->storage()._impl->data(), _tl._impl->storage()._impl); return out; @@ -42,7 +42,7 @@ namespace cytnx { } else { #ifdef UNI_GPU checkCudaErrors(cudaSetDevice(Tl.device())); - cytnx::linalg_internal::lii.cuNorm_ii[_tl.dtype()](out._impl->storage()._impl->Mem, + cytnx::linalg_internal::lii.cuNorm_ii[_tl.dtype()](out._impl->storage()._impl->data(), _tl._impl->storage()._impl); return out; diff --git a/src/linalg/Sub.cpp b/src/linalg/Sub.cpp index 6f25ddb3..0ac6e957 100644 --- a/src/linalg/Sub.cpp +++ b/src/linalg/Sub.cpp @@ -399,10 +399,8 @@ namespace cytnx { template <> Tensor Sub(const Scalar &lc, const Tensor &Rt) { - Storage Cnst; // create a shallow container without allocate. Using base! - - Cnst._impl->Mem = lc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, lc.dtype()); + Cnst.set_item(0, lc); Tensor out; out._impl = Rt._impl->_clone_meta_only(); @@ -425,9 +423,6 @@ namespace cytnx { #endif } - // swap back to prevent also free by recycle mech. - Cnst._impl->Mem = nullptr; - return out; } @@ -758,10 +753,8 @@ namespace cytnx { template <> Tensor Sub(const Tensor &Lt, const Scalar &rc) { - Storage Cnst; // create a shallow container without allocate. Using base! - - Cnst._impl->Mem = rc._impl->get_raw_address(); - Cnst._impl->len = 1; + Storage Cnst(1, rc.dtype()); + Cnst.set_item(0, rc); Tensor out; out._impl = Lt._impl->_clone_meta_only(); @@ -783,10 +776,6 @@ namespace cytnx { "\n"); #endif } - - // swap back to prevent also free by recycle mech. - Cnst._impl->Mem = nullptr; - return out; } diff --git a/src/utils/cutensornet.cu b/src/utils/cutensornet.cu index fa0b3150..6c1edc52 100644 --- a/src/utils/cutensornet.cu +++ b/src/utils/cutensornet.cu @@ -149,7 +149,7 @@ namespace cytnx { } void cutensornet::setOutputMem(UniTensor &res) { - R_d = (void *)res.get_block_()._impl->storage()._impl->Mem; + R_d = (void *)res.get_block_()._impl->storage()._impl->data(); } void cutensornet::setInputMem(std::vector &uts) { @@ -158,14 +158,15 @@ namespace cytnx { tns = std::vector(uts.size()); for (int idx = 0; idx < uts.size(); idx++) { if (uts[idx].is_contiguous()) - rawDataIn_d[idx] = (void *)uts[idx].get_block_()._impl->storage()._impl->Mem; + rawDataIn_d[idx] = (void *)uts[idx].get_block_()._impl->storage()._impl->data(); else - rawDataIn_d[idx] = (void *)uts[idx].get_block_().contiguous()._impl->storage()._impl->Mem; + rawDataIn_d[idx] = + (void *)uts[idx].get_block_().contiguous()._impl->storage()._impl->data(); if (uts[idx].is_contiguous()) tns[idx] = uts[idx]; else tns[idx] = uts[idx].contiguous(); - rawDataIn_d[idx] = tns[idx].get_block_()._impl->storage()._impl->Mem; + rawDataIn_d[idx] = tns[idx].get_block_()._impl->storage()._impl->data(); } }