Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added rdms #14

Open
wants to merge 1 commit into
base: wip
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
264 changes: 264 additions & 0 deletions python/lattice_symmetries/rdms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,264 @@
from typing import Literal, overload

import numba
import numpy as np
import numpy.typing as npt
import scipy.special
from numba import float32, float64, uint64

from . import Basis


def index_to_spin(index: npt.NDArray[np.uint64], number_spins: int) -> npt.NDArray[np.bool_]:
return (
(index.reshape(-1, 1).astype(np.int64) & (1 << np.arange(number_spins).astype(np.int64)))
) > 0


def spin_to_index(
spin: npt.NDArray[np.bool_ | np.int_ | np.uint], number_spins: int
) -> npt.NDArray[np.uint64]:
a = 2 ** np.arange(number_spins, dtype=np.int64)
return spin.dot(a)


@numba.jit("uint64(uint64, uint64)", nogil=True, nopython=True)
def _binom(n, k):
r"""Compute the number of ways to choose k elements out of a pile of n.

:param n: the size of the pile of elements
:param k: the number of elements to take from the pile
:return: the number of ways to choose k elements out of a pile of n
"""
assert 0 <= n and n < 40
assert 0 <= k and k <= n

if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways


@numba.jit("uint64[:, :](uint64, uint64)", nogil=True, nopython=True)
def make_binom_cache(max_n, max_k):
assert 0 < max_k and max_k <= max_n
assert max_n < 40

cache = np.zeros((max_n + 1, max_k + 2), dtype=np.uint64)
for i in range(max_n + 1):
for j in range(min(i, max_k) + 2):
cache[i, j] = _binom(i, j) if j <= i else 0
return cache


@numba.jit("uint64(uint64)", nogil=True, nopython=True)
def _hamming_weight(n: int):
# See https://stackoverflow.com/a/9830282
n = (n & 0x5555555555555555) + ((n & 0xAAAAAAAAAAAAAAAA) >> 1)
n = (n & 0x3333333333333333) + ((n & 0xCCCCCCCCCCCCCCCC) >> 2)
n = (n & 0x0F0F0F0F0F0F0F0F) + ((n & 0xF0F0F0F0F0F0F0F0) >> 4)
n = (n & 0x00FF00FF00FF00FF) + ((n & 0xFF00FF00FF00FF00) >> 8)
n = (n & 0x0000FFFF0000FFFF) + ((n & 0xFFFF0000FFFF0000) >> 16)
n = (n & 0x00000000FFFFFFFF) + ((n & 0xFFFFFFFF00000000) >> 32)
return n


@numba.jit("uint64[:](uint64, uint64)", nogil=True, nopython=True)
def generate_binaries(length: int, hamming: int) -> npt.NDArray[np.uint64]:
assert length > 0
assert hamming >= 0 and hamming <= length
if hamming == 0:
return np.zeros(1, dtype=np.uint64)

size = _binom(length, hamming)
set_of_vectors = np.empty(size, dtype=np.uint64)
val = (1 << hamming) - 1
for i in range(size):
set_of_vectors[i] = val
c = val & -val
r = val + c
val = (((r ^ val) >> 2) // c) | r
return set_of_vectors


@numba.jit("uint64(uint64, uint64, uint64)", nogil=True, nopython=True)
def _merge(vec1: int, vec2: int, dim2: int) -> int:
"""
(vec1 << dim2) | vec2
"""
return (vec1 << dim2) | vec2


@numba.jit("uint64(uint64, uint64, uint64[:, :])", nogil=True, nopython=True)
def _get_index(x: int, dim: int, binom_cache: npt.NDArray[np.uint64]) -> int:
r"""Compute the lexicographic index of a binary number among those with
the same Hamming weight.

:param x: the binary number to find the index for
:param dim: the number of bits in the binary number
:param binom_cache: a 2D array of precomputed binomial coefficients
:return: the lexicographic index of the input binary number

This function uses a precomputed binomial coefficient cache for efficiency.
It is optimized with Numba's just-in-time compilation and assumes x is a
valid binary number with dim bits.
"""

n = 0
h = 0
if x & 1 == 1:
h += 1
x >>= 1
for i in range(1, dim):
if x & 1 == 1:
h += 1
n += binom_cache[i, h]
x >>= 1
return n


@numba.jit(
"complex128(uint64, uint64, uint64, uint64, uint64, complex128[::1], uint64[:, :])",
nogil=True,
nopython=True,
)
def _density_matrix_element(
sector_dim: int,
dim: int,
hamming: int,
vec1: int,
vec2: int,
amplitudes: npt.NDArray[np.complex128],
binom_cache: npt.NDArray[np.uint64],
) -> complex:
sector_hamming = _hamming_weight(vec1)
assert sector_hamming == _hamming_weight(vec2)

smallest_number = 2 ** (hamming - sector_hamming) - 1
k = dim - sector_dim
index1 = _get_index(_merge(vec1, smallest_number, k), dim, binom_cache)
index2 = _get_index(_merge(vec2, smallest_number, k), dim, binom_cache)
size_of_traced = _binom(k, hamming - sector_hamming)
matrix_element = np.dot(
amplitudes[index1 : index1 + size_of_traced].conj(),
amplitudes[index2 : index2 + size_of_traced],
)

return matrix_element


@numba.jit(
"Tuple((complex128[:, :], uint64[:]))(uint64, uint64, uint64, uint64, complex128[::1])",
nogil=True,
nopython=True,
parallel=False,
)
def _sector_density_matrix(
sector_dim: int,
dim: int,
sector_hamming: int,
hamming: int,
amplitudes: npt.NDArray[np.complex128],
) -> tuple[npt.NDArray[np.complex128], npt.NDArray[np.uint64]]:
assert sector_hamming <= hamming and sector_dim <= dim
assert hamming - sector_hamming <= dim - sector_dim

sector_basis = generate_binaries(sector_dim, sector_hamming)
binom_cache = make_binom_cache(dim, hamming)
n = len(sector_basis)
matrix = np.empty((n, n), dtype=np.complex128)
for i in range(len(sector_basis)):
for j in range(i, len(sector_basis)):
matrix[i, j] = _density_matrix_element(
sector_dim,
dim,
hamming,
sector_basis[i],
sector_basis[j],
amplitudes,
binom_cache,
)
matrix[j, i] = np.conj(matrix[i, j])

return matrix, sector_basis


def _density_matrix_blocks(
sub_dim: int, dim: int, hamming: int, amplitudes: npt.NDArray[np.complex128]
) -> tuple[list[npt.NDArray[np.complex128]], list[npt.NDArray[np.uint64]]]:
matrices = []
sector_basis_states = []
for sub_hamming in range(max(0, hamming - (dim - sub_dim)), min(hamming, sub_dim) + 1):
matrix, sector_basis = _sector_density_matrix(
sub_dim, dim, sub_hamming, hamming, amplitudes
)
matrices.append(matrix)
sector_basis_states.append(sector_basis)

return matrices, sector_basis_states


def _move_sites_to_back(
wavefunction: npt.NDArray[np.complex128], basis: ls.Basis, sites: list[int]
) -> npt.NDArray[np.complex128]:
unpacked_states = index_to_spin(basis.states, basis.number_sites)
permuted_sites = sorted(set(range(basis.number_sites)) - set(sites)) + sites
assert set(permuted_sites) == set(range(basis.number_sites))
assert len(permuted_sites) == basis.number_sites
permuted_unpacked_states = unpacked_states[:, np.argsort(permuted_sites)]
permuted_states = spin_to_index(permuted_unpacked_states, basis.number_sites)
return wavefunction[basis.index(permuted_states)]


def _reduced_density_matrix_blocks(
wavefunction: npt.NDArray[np.complex128], basis: ls.Basis, sites: list[int]
) -> tuple[list[npt.NDArray[np.complex128]], list[npt.NDArray[np.uint64]]]:
if basis.hamming_weight is None:
raise ValueError("Only bases with a fixed hamming weight are supported.")
if basis.symmetries != []:
raise ValueError("Symmetries are not supported (yet)")
if basis.spin_inversion is not None:
raise ValueError("Spin inversion is not supported (yet)")

hamming_weight = basis.hamming_weight
n_sites = basis.number_sites
wavefunction = _move_sites_to_back(wavefunction, basis, sites)
return _density_matrix_blocks(len(sites), n_sites, hamming_weight, wavefunction)


@overload
def reduced_density_matrix(
wavefunction: npt.NDArray[np.complex128],
basis: Basis,
sites: list[int],
return_states: Literal[True] = True,
) -> tuple[npt.NDArray[np.complex128], npt.NDArray[np.uint64]]: ...


@overload
def reduced_density_matrix(
wavefunction: npt.NDArray[np.complex128],
basis: Basis,
sites: list[int],
return_states: Literal[False] = True,
) -> npt.NDArray[np.complex128]: ...


def reduced_density_matrix(
wavefunction: npt.NDArray[np.complex128],
basis: Basis,
sites: list[int],
return_states: bool = True,
) -> npt.NDArray[np.complex128] | tuple[npt.NDArray[np.complex128], npt.NDArray[np.uint64]]:
matrices, sector_basis_states = _reduced_density_matrix_blocks(wavefunction, basis, sites)
if return_states:
return scipy.linalg.block_diag(*matrices), np.concatenate(sector_basis_states)

all_states = np.arange(2 ** len(sites), dtype=np.uint64)
full_matrix = np.zeros((2 ** len(sites), 2 ** len(sites)), dtype=np.complex128)
for matrix, states in zip(matrices, sector_basis_states):
state_idxs = np.searchsorted(all_states, states)
full_matrix[np.ix_(state_idxs, state_idxs)] = matrix
return full_matrix
46 changes: 46 additions & 0 deletions python/test/test_rdms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import itertools

import lattice_symmetries as ls
import numpy as np
import numpy.typing as npt
import scipy


def test_reduced_density_matrix_hamming():
def rdms_reference(
ground_state: npt.NDArray[np.float64], sites: list[int], n_spins: int
) -> npt.NDArray[np.float64]:
ground_state_reshaped = ground_state.reshape((2,) * n_spins).transpose(
range(n_spins - 1, -1, -1)
)
idxs = tuple(sorted(set(range(n_spins)) - set(sites)))
ground_state_transposed = ground_state_reshaped.transpose(tuple(reversed(sites)) + idxs)
ground_state_transposed = ground_state_transposed.reshape(
(2 ** len(sites), 2 ** (n_spins - len(sites)))
)
return ground_state_transposed @ ground_state_transposed.T

expr_str = "σᶻ₀ σᶻ₁ + σᶻ₀ σᶻ₃ + σᶻ₀ σᶻ₄ + σᶻ₀ σᶻ₁₂ + 2.0 σ⁺₀ σ⁻₁ + 2.0 σ⁺₀ σ⁻₃ + 2.0 σ⁺₀ σ⁻₄ + 2.0 σ⁺₀ σ⁻₁₂ + 2.0 σ⁻₀ σ⁺₁ + 2.0 σ⁻₀ σ⁺₃ + 2.0 σ⁻₀ σ⁺₄ + 2.0 σ⁻₀ σ⁺₁₂ + σᶻ₁ σᶻ₂ + σᶻ₁ σᶻ₅ + σᶻ₁ σᶻ₁₃ + 2.0 σ⁺₁ σ⁻₂ + 2.0 σ⁺₁ σ⁻₅ + 2.0 σ⁺₁ σ⁻₁₃ + 2.0 σ⁻₁ σ⁺₂ + 2.0 σ⁻₁ σ⁺₅ + 2.0 σ⁻₁ σ⁺₁₃ + σᶻ₂ σᶻ₃ + σᶻ₂ σᶻ₆ + σᶻ₂ σᶻ₁₄ + 2.0 σ⁺₂ σ⁻₃ + 2.0 σ⁺₂ σ⁻₆ + 2.0 σ⁺₂ σ⁻₁₄ + 2.0 σ⁻₂ σ⁺₃ + 2.0 σ⁻₂ σ⁺₆ + 2.0 σ⁻₂ σ⁺₁₄ + σᶻ₃ σᶻ₇ + σᶻ₃ σᶻ₁₅ + 2.0 σ⁺₃ σ⁻₇ + 2.0 σ⁺₃ σ⁻₁₅ + 2.0 σ⁻₃ σ⁺₇ + 2.0 σ⁻₃ σ⁺₁₅ + σᶻ₄ σᶻ₅ + σᶻ₄ σᶻ₇ + σᶻ₄ σᶻ₈ + 2.0 σ⁺₄ σ⁻₅ + 2.0 σ⁺₄ σ⁻₇ + 2.0 σ⁺₄ σ⁻₈ + 2.0 σ⁻₄ σ⁺₅ + 2.0 σ⁻₄ σ⁺₇ + 2.0 σ⁻₄ σ⁺₈ + σᶻ₅ σᶻ₆ + σᶻ₅ σᶻ₉ + 2.0 σ⁺₅ σ⁻₆ + 2.0 σ⁺₅ σ⁻₉ + 2.0 σ⁻₅ σ⁺₆ + 2.0 σ⁻₅ σ⁺₉ + σᶻ₆ σᶻ₇ + σᶻ₆ σᶻ₁₀ + 2.0 σ⁺₆ σ⁻₇ + 2.0 σ⁺₆ σ⁻₁₀ + 2.0 σ⁻₆ σ⁺₇ + 2.0 σ⁻₆ σ⁺₁₀ + σᶻ₇ σᶻ₁₁ + 2.0 σ⁺₇ σ⁻₁₁ + 2.0 σ⁻₇ σ⁺₁₁ + σᶻ₈ σᶻ₉ + σᶻ₈ σᶻ₁₁ + σᶻ₈ σᶻ₁₂ + 2.0 σ⁺₈ σ⁻₉ + 2.0 σ⁺₈ σ⁻₁₁ + 2.0 σ⁺₈ σ⁻₁₂ + 2.0 σ⁻₈ σ⁺₉ + 2.0 σ⁻₈ σ⁺₁₁ + 2.0 σ⁻₈ σ⁺₁₂ + σᶻ₉ σᶻ₁₀ + σᶻ₉ σᶻ₁₃ + 2.0 σ⁺₉ σ⁻₁₀ + 2.0 σ⁺₉ σ⁻₁₃ + 2.0 σ⁻₉ σ⁺₁₀ + 2.0 σ⁻₉ σ⁺₁₃ + σᶻ₁₀ σᶻ₁₁ + σᶻ₁₀ σᶻ₁₄ + 2.0 σ⁺₁₀ σ⁻₁₁ + 2.0 σ⁺₁₀ σ⁻₁₄ + 2.0 σ⁻₁₀ σ⁺₁₁ + 2.0 σ⁻₁₀ σ⁺₁₄ + σᶻ₁₁ σᶻ₁₅ + 2.0 σ⁺₁₁ σ⁻₁₅ + 2.0 σ⁻₁₁ σ⁺₁₅ + σᶻ₁₂ σᶻ₁₃ + σᶻ₁₂ σᶻ₁₅ + 2.0 σ⁺₁₂ σ⁻₁₃ + 2.0 σ⁺₁₂ σ⁻₁₅ + 2.0 σ⁻₁₂ σ⁺₁₃ + 2.0 σ⁻₁₂ σ⁺₁₅ + σᶻ₁₃ σᶻ₁₄ + 2.0 σ⁺₁₃ σ⁻₁₄ + 2.0 σ⁻₁₃ σ⁺₁₄ + σᶻ₁₄ σᶻ₁₅ + 2.0 σ⁺₁₄ σ⁻₁₅ + 2.0 σ⁻₁₄ σ⁺₁₅"
number_sites = 16
basis = ls.SpinBasis(
number_spins=number_sites,
hamming_weight=number_sites // 2,
)
basis_no_hamming = ls.SpinBasis(number_spins=number_sites, hamming_weight=None)
basis.build()
basis_no_hamming.build()
expression = ls.Expr(expr_str, particle="spin-1/2")
hamiltonian = ls.Operator(expression, basis)
hamiltonian_no_hamming = ls.Operator(expression, basis_no_hamming)
ground_state = scipy.sparse.linalg.eigsh(hamiltonian, k=1, which="SA")[1].ravel()
ground_state_no_hamming = scipy.sparse.linalg.eigsh(hamiltonian_no_hamming, k=1, which="SA")[
1
].ravel()

for length in [1, 2, 3]:
for sites in itertools.combinations(range(basis.number_sites), length):
reference = rdms_reference(ground_state_no_hamming, list(sites), basis.number_sites)
rdm = ls.rdms.reduced_density_matrix(
ground_state.astype(np.complex128), basis, list(sites), return_states=False
)
assert np.allclose(reference, rdm)
Loading