From 5f1bad5eacf5ba19378975e113e87a8e5dcb3173 Mon Sep 17 00:00:00 2001 From: Mohammed Boky <57175861+Mboky@users.noreply.github.com> Date: Tue, 5 Dec 2023 14:18:16 +0100 Subject: [PATCH] Fix non convergence in example of usage (#13) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hi, The current example of usage does not converge in the amount of iterations specified ```python import netket as nk import netket_fidelity as nkf # Create the Hilbert space and the variational states |ψ⟩ and |ϕ⟩ hi = nk.hilbert.Spin(0.5, 4) sampler = nk.sampler.MetropolisLocal(hilbert=hi, n_chains_per_rank=16) model = nk.models.RBM(alpha=1, param_dtype=complex, use_visible_bias=False) phi = nk.vqs.MCState(sampler=sampler, model=model, n_samples=100) psi = nk.vqs.MCState(sampler=sampler, model=model, n_samples=100) # Transformation U U = nkf.operator.Hadamard(hi, 0) # Create the driver optimizer = nk.optimizer.Sgd(learning_rate=0.01) te = nkf.driver.InfidelityOptimizer(phi, optimizer, U=U, U_dagger=U, variational_state=psi, is_unitary=True, cv_coeff=-1/2) # Run the driver te.run(n_iter=100) ``` This keeps yielding an infidelity around 0.5. Either adding more iterations or changing to the Adam optimizer fixes this. With Adam and the same number of iterations, infidelity is consistently smaller than 0.001 Co-authored-by: Mohammed Boky --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c0add77..0ee7fba 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ psi = nk.vqs.MCState(sampler=sampler, model=model, n_samples=100) U = nkf.operator.Hadamard(hi, 0) # Create the driver -optimizer = nk.optimizer.Sgd(learning_rate=0.01) +optimizer = nk.optimizer.Adam(learning_rate=0.01) te = nkf.driver.InfidelityOptimizer(phi, optimizer, U=U, U_dagger=U, variational_state=psi, is_unitary=True, cv_coeff=-1/2) # Run the driver