Skip to content

Commit

Permalink
Merge pull request #1207 from khalatepradnya/align-with-main
Browse files Browse the repository at this point in the history
[experimental/python] Align with mainline
  • Loading branch information
khalatepradnya authored Feb 10, 2024
2 parents a7961d4 + c8c52c0 commit a75f1f6
Show file tree
Hide file tree
Showing 44 changed files with 1,605 additions and 197 deletions.
1 change: 1 addition & 0 deletions .github/workflows/config/spelling_allowlist.txt
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ MSB
Max-Cut
MyST
NGC
NVCF
NVIDIA
NVQIR
OQC
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/publishing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,6 @@ jobs:
name: CUDA Quantum installer
if: ${{ toJson(fromJson(needs.assets.outputs.installers).info_files) != '[]' }}
needs: assets
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
Expand All @@ -478,6 +477,8 @@ jobs:
info_file: ${{ fromJson(needs.assets.outputs.installers).info_files }}
fail-fast: false

runs-on: ${{ (contains(matrix.info_file, 'arm') && 'linux-arm64-cpu8') || 'linux-amd64-cpu8' }}

steps:
- name: Checkout repository
uses: actions/checkout@v4
Expand Down
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ if(NOT CURL_LIBRARY AND EXISTS "$ENV{CURL_INSTALL_PREFIX}/lib/libcurl.a")
SET(CURL_INCLUDE_DIR "$ENV{CURL_INSTALL_PREFIX}/include")
SET(CURL_CONFIG_EXECUTABLE "$ENV{CURL_INSTALL_PREFIX}/bin/curl-config")
SET(CMAKE_USE_SYSTEM_CURL TRUE)
SET(CURL_NO_CURL_CMAKE ON)
endif()
if(NOT CUDAQ_EXTERNAL_NVQIR_SIMS)
SET(CUDAQ_EXTERNAL_NVQIR_SIMS $ENV{CUDAQ_EXTERNAL_NVQIR_SIMS})
Expand Down
4 changes: 0 additions & 4 deletions docker/build/assets.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,6 @@ RUN dnf install -y --nobest --setopt=install_weak_deps=False wget git unzip

## [CUDA]
RUN source /cuda-quantum/scripts/configure_build.sh install-cuda
## [cuQuantum]
RUN source /cuda-quantum/scripts/configure_build.sh install-cuquantum
## [cuTensor]
RUN source /cuda-quantum/scripts/configure_build.sh install-cutensor
## [Compiler Toolchain]
RUN source /cuda-quantum/scripts/configure_build.sh install-gcc

Expand Down
2 changes: 2 additions & 0 deletions docs/sphinx/api/languages/cpp_api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ Common
.. doxygenclass:: cudaq::complex_matrix
:members:

.. doxygenclass:: cudaq::Trace

.. doxygenclass:: cudaq::Resources

.. doxygentypedef:: cudaq::complex_matrix::value_type
Expand Down
37 changes: 0 additions & 37 deletions docs/sphinx/data_center_install.rst
Original file line number Diff line number Diff line change
Expand Up @@ -172,43 +172,6 @@ install CUDA 11.8:
:start-after: [>CUDAInstall]
:end-before: [<CUDAInstall]

cuQuantum
+++++++++++++++++++++++++++++++

Each version of CUDA Quantum is compatible only with a specific cuQuantum version.
As of CUDA Quantum 0.6, this is version 23.10. Newer versions of cuQuantum (if they exist)
might be compatible but have not been tested.

Make sure the environment variable `CUDA_ARCH_FOLDER` is set to either `x86_64`
or `sbsa` (for ARM64) depending on your processor architecture, and `CUDA_VERSION`
is set to the installed CUDA version.
Install cuQuantum version 23.10 using the following commands:

.. literalinclude:: ../../scripts/configure_build.sh
:language: bash
:dedent:
:start-after: [>cuQuantumInstall]
:end-before: [<cuQuantumInstall]

cuTensor
+++++++++++++++++++++++++++++++

Depending on how you installed CUDA, the cuTensor library is usually not included
in the installation. This library is used by some of the simulator backends.
Please check the cuQuantum documentation to ensure you choose a version that is
compatible with the used cuQuantum version, such as version 1.7.

Make sure the environment variable `CUDA_ARCH_FOLDER` is set to either `x86_64`
or `sbsa` (for ARM64) depending on your processor architecture, and `CUDA_VERSION`
is set to the installed CUDA version.
Install cuTensor version 1.7 using the following commands:

.. literalinclude:: ../../scripts/configure_build.sh
:language: bash
:dedent:
:start-after: [>cuTensorInstall]
:end-before: [<cuTensorInstall]

Toolchain
+++++++++++++++++++++++++++++++

Expand Down
49 changes: 49 additions & 0 deletions docs/sphinx/examples/cpp/providers/nvcf_sample.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Compile and run with:
// ```
// nvq++ --target nvcf nvcf_sample.cpp -o out.x
// ./out.x
// ```
// Assumes a valid NVCF API key and function ID have been stored in environment
// variables or `~/.nvcf_config` file. Alternatively, they can be set in the
// command line like below.
// ```
// nvq++ --target nvcf --nvcf-api-key <YOUR API KEY> --nvcf-function-id \
// <NVCF function Id> nvcf_sample.cpp -o out.x
// ./out.x
// ```
// Please refer to the documentations for information about how to attain NVCF
// information.

#include <cudaq.h>
#include <iostream>

// Define a simple quantum kernel to execute on NVCF.
struct ghz {
// Maximally entangled state between 25 qubits.
auto operator()() __qpu__ {
constexpr int NUM_QUBITS = 25;
cudaq::qvector q(NUM_QUBITS);
h(q[0]);
for (int i = 0; i < NUM_QUBITS - 1; i++) {
x<cudaq::ctrl>(q[i], q[i + 1]);
}
auto result = mz(q);
}
};

int main() {
// Submit to NVCF asynchronously (e.g., continue executing
// code in the file until the job has been returned).
auto async_counts_handle = cudaq::sample_async(ghz{});
// ... classical code to execute in the meantime ...
std::cout << "Waiting for NVCF result...\n";

// Calling .get() on the handle to synchronize the result.
auto async_counts = async_counts_handle.get();
async_counts.dump();

// OR: Submit to NVCF synchronously (e.g., wait for the job
// result to be returned before proceeding).
auto counts = cudaq::sample(ghz{});
counts.dump();
}
32 changes: 32 additions & 0 deletions docs/sphinx/examples/cpp/providers/nvcf_state.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Compile and run with:
// ```
// nvq++ --target nvcf --nvcf-backend tensornet nvcf_state.cpp -o out.x
// ./out.x
// ```
// Assumes a valid NVCF API key and function ID have been stored in environment
// variables or `~/.nvcf_config` file. Alternatively, they can be set in the
// command line like below.
// ```
// nvq++ --target nvcf --nvcf-backend tensornet --nvcf-api-key <YOUR API KEY> \
// --nvcf-function-id <NVCF function Id> nvcf_state.cpp -o out.x
// ./out.x
// ```
// Please refer to the documentations for information about how to attain NVCF
// information.

#include "cudaq/algorithms/state.h"
#include <cudaq.h>
#include <iostream>

int main() {
auto kernel = cudaq::make_kernel();
const std::size_t NUM_QUBITS = 20;
auto q = kernel.qalloc(NUM_QUBITS);
kernel.h(q[0]);
for (std::size_t qId = 0; qId < NUM_QUBITS - 1; ++qId)
kernel.x<cudaq::ctrl>(q[qId], q[qId + 1]);
auto state = cudaq::get_state(kernel);
std::cout << "Amplitude(00..00) = " << state[0] << "\n";
std::cout << "Amplitude(11..11) = " << state[(1ULL << NUM_QUBITS) - 1]
<< "\n";
}
60 changes: 60 additions & 0 deletions docs/sphinx/examples/cpp/providers/nvcf_vqe.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
// Compile and run with:
// ```
// nvq++ --target nvcf --nvcf-nqpus 3 nvcf_vqe.cpp -o out.x
// ./out.x
// ```
// Note: we set `nqpus` to 3 to establish 3 concurrent NVCF job submission
// pipes. Assumes a valid NVCF API key and function ID have been stored in
// environment variables or `~/.nvcf_config` file. Alternatively, they can be
// set in the command line like below.
// ```
// nvq++ --target nvcf --nvcf-nqpus 3 --nvcf-api-key <YOUR API KEY> \
// --nvcf-function-id <NVCF function Id> nvcf_vqe.cpp -o out.x
// ./out.x
// ```
// Please refer to the documentations for information about how to attain NVCF
// information.

#include <cudaq.h>
#include <cudaq/algorithm.h>
#include <cudaq/gradients.h>
#include <cudaq/optimizers.h>
#include <iostream>

int main() {
using namespace cudaq::spin;
cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) +
.21829 * z(0) - 6.125 * z(1);

auto [ansatz, theta] = cudaq::make_kernel<double>();
auto q = ansatz.qalloc();
auto r = ansatz.qalloc();
ansatz.x(q);
ansatz.ry(theta, r);
ansatz.x<cudaq::ctrl>(r, q);

// Run VQE with a gradient-based optimizer.
// Delegate cost function and gradient computation across different NVCF-based
// QPUs. Note: depending on the user's account, there might be different
// number of NVCF worker instances available. Hence, although we're making
// concurrent job submissions across multiple QPUs, the speedup would be
// determined by the number of NVCF worker instances.
cudaq::optimizers::lbfgs optimizer;
auto [opt_val, opt_params] = optimizer.optimize(
/*dim=*/1, /*opt_function*/ [&](const std::vector<double> &params,
std::vector<double> &grads) {
// Queue asynchronous jobs to do energy evaluations across multiple QPUs
auto energy_future =
cudaq::observe_async(/*qpu_id=*/0, ansatz, h, params[0]);
const double paramShift = M_PI_2;
auto plus_future = cudaq::observe_async(/*qpu_id=*/1, ansatz, h,
params[0] + paramShift);
auto minus_future = cudaq::observe_async(/*qpu_id=*/2, ansatz, h,
params[0] - paramShift);
grads[0] = (plus_future.get().expectation() -
minus_future.get().expectation()) /
2.0;
return energy_future.get().expectation();
});
std::cout << "Minimum energy = " << opt_val << " (expected -1.74886).\n";
}
29 changes: 29 additions & 0 deletions docs/sphinx/examples/python/providers/nvcf_sample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import cudaq

# This example assumes the NVCF API key and Function Id have been set in the `~/.nvcf_config` file/environment variables.
# If not, you can set the API Key and Function ID environment variables in the Python script with:
# ```
# os.environ["NVCF_API_KEY"] = "<YOUR NVCF API KEY>"`
# os.environ["NVCF_FUNCTION_ID"] = "<YOUR NVCF FUNCTION ID>"
# ```
# Alternatively, the `api_key` and `function_id` values can be passed to the target directly,
# ```
# cudaq.set_target("nvcf",
# backend="tensornet",
# api_key="<YOUR NVCF API KEY>"
# function_id="<YOUR NVCF FUNCTION ID>")
# ```
cudaq.set_target("nvcf", backend="tensornet")

num_qubits = 50
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(num_qubits)
# Place qubits in superposition state.
kernel.h(qubits[0])
for i in range(num_qubits - 1):
kernel.cx(qubits[i], qubits[i + 1])
# Measure.
kernel.mz(qubits)

counts = cudaq.sample(kernel, shots_count=100)
print(counts)
27 changes: 27 additions & 0 deletions docs/sphinx/examples/python/providers/nvcf_state.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import cudaq

# This example assumes the NVCF API key and Function Id have been set in the `~/.nvcf_config` file/environment variables.
# If not, you can set the API Key and Function ID environment variables in the Python script with:
# ```
# os.environ["NVCF_API_KEY"] = "<YOUR NVCF API KEY>"`
# os.environ["NVCF_FUNCTION_ID"] = "<YOUR NVCF FUNCTION ID>"
# ```
# Alternatively, the `api_key` and `function_id` values can be passed to the target directly,
# ```
# cudaq.set_target("nvcf",
# api_key="<YOUR NVCF API KEY>"
# function_id="<YOUR NVCF FUNCTION ID>")
# ```
cudaq.set_target("nvcf")

num_qubits = 20
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(num_qubits)
# Place qubits in GHZ state.
kernel.h(qubits[0])
for i in range(num_qubits - 1):
kernel.cx(qubits[i], qubits[i + 1])

state = cudaq.get_state(kernel)
print("Amplitude(00..00) =", state[0])
print("Amplitude(11..11) =", state[2**num_qubits - 1])
62 changes: 62 additions & 0 deletions docs/sphinx/examples/python/providers/nvcf_vqe.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import cudaq
from cudaq import spin
import math

# This example assumes the NVCF API key and Function Id have been set in the `~/.nvcf_config` file/environment variables.
# If not, you can set the API Key and Function ID environment variables in the Python script with:
# ```
# os.environ["NVCF_API_KEY"] = "<YOUR NVCF API KEY>"`
# os.environ["NVCF_FUNCTION_ID"] = "<YOUR NVCF FUNCTION ID>"
# ```
# Alternatively, the `api_key` and `function_id` values can be passed to the target directly,
# ```
# cudaq.set_target("nvcf",
# nqpus=3,
# api_key="<YOUR NVCF API KEY>"
# function_id="<YOUR NVCF FUNCTION ID>")
# ```
cudaq.set_target("nvcf", nqpus=3)

print("Number of QPUs:", cudaq.get_target().num_qpus())
# Note: depending on the user's account, there might be different
# number of NVCF worker instances available. Hence, although we're making
# concurrent job submissions across multiple QPUs, the speedup would be
# determined by the number of NVCF worker instances.
# Create the parameterized ansatz
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])

# Define its spin Hamiltonian.
hamiltonian = (5.907 - 2.1433 * spin.x(0) * spin.x(1) -
2.1433 * spin.y(0) * spin.y(1) + 0.21829 * spin.z(0) -
6.125 * spin.z(1))


def opt_gradient(parameter_vector):
# Evaluate energy and gradient on different remote QPUs
# (i.e., concurrent job submissions to NVCF)
energy_future = cudaq.observe_async(kernel,
hamiltonian,
parameter_vector[0],
qpu_id=0)
plus_future = cudaq.observe_async(kernel,
hamiltonian,
parameter_vector[0] + 0.5 * math.pi,
qpu_id=1)
minus_future = cudaq.observe_async(kernel,
hamiltonian,
parameter_vector[0] - 0.5 * math.pi,
qpu_id=2)
return (energy_future.get().expectation(), [
(plus_future.get().expectation() - minus_future.get().expectation()) /
2.0
])


optimizer = cudaq.optimizers.LBFGS()
optimal_value, optimal_parameters = optimizer.optimize(1, opt_gradient)
print("Ground state energy =", optimal_value)
print("Optimal parameters =", optimal_parameters)
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
# Set the target to execute on and query the number of QPUs in the system;
# The number of QPUs is equal to the number of (auto-)launched server instances.
cudaq.set_target("remote-mqpu",
remote_execution=True,
backend=backend,
auto_launch=str(servers) if servers.isdigit() else "",
url="" if servers.isdigit() else servers)
Expand Down
4 changes: 4 additions & 0 deletions python/runtime/cudaq/target/py_runtime_target.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ void bindRuntimeTarget(py::module &mod, LinkedLibraryHolder &holder) {
strValue = value.cast<py::bool_>() ? "true" : "false";
else if (py::isinstance<py::str>(value))
strValue = value.cast<std::string>();
else if (py::isinstance<py::int_>(value))
strValue = std::to_string(value.cast<int>());
else
throw std::runtime_error(
"QPU kwargs config value must be cast-able to a string.");
Expand All @@ -100,6 +102,8 @@ void bindRuntimeTarget(py::module &mod, LinkedLibraryHolder &holder) {
strValue = value.cast<py::bool_>() ? "true" : "false";
else if (py::isinstance<py::str>(value))
strValue = value.cast<std::string>();
else if (py::isinstance<py::int_>(value))
strValue = std::to_string(value.cast<int>());
else
throw std::runtime_error(
"QPU kwargs config value must be cast-able to a string.");
Expand Down
1 change: 0 additions & 1 deletion python/tests/remote/test_remote_platform.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
@pytest.fixture(scope="session", autouse=True)
def startUpMockServer():
cudaq.set_target("remote-mqpu",
remote_execution=True,
auto_launch=str(num_qpus))
yield
cudaq.reset_target()
Expand Down
Loading

0 comments on commit a75f1f6

Please sign in to comment.