Skip to content

Commit

Permalink
♻️ lift limitation on number of qubits supported in mapper
Browse files Browse the repository at this point in the history
Signed-off-by: burgholzer <burgholzer@me.com>
  • Loading branch information
burgholzer committed Sep 9, 2024
1 parent 9ce6f97 commit db990fa
Show file tree
Hide file tree
Showing 8 changed files with 88 additions and 74 deletions.
2 changes: 0 additions & 2 deletions include/sc/Architecture.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ constexpr std::uint32_t COST_TELEPORTATION =
2 * COST_CNOT_GATE + COST_MEASUREMENT + 4 * COST_SINGLE_QUBIT_GATE;
constexpr std::uint32_t COST_DIRECTION_REVERSE = 4 * COST_SINGLE_QUBIT_GATE;

constexpr std::uint16_t MAX_DEVICE_QUBITS = 128;

class Architecture {
public:
class Properties {
Expand Down
12 changes: 5 additions & 7 deletions include/sc/DataLogger.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,19 +43,17 @@ class DataLogger {
void logSearchNode(std::size_t layer, std::size_t nodeId,
std::size_t parentId, double costFixed, double costHeur,
double lookaheadPenalty,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& qubits,
bool validMapping, const std::vector<Exchange>& swaps,
std::size_t depth);
const std::vector<std::int16_t>& qubits, bool validMapping,
const std::vector<Exchange>& swaps, std::size_t depth);
void logFinalizeLayer(
std::size_t layer, const qc::CompoundOperation& ops,
const std::vector<std::uint16_t>& singleQubitMultiplicity,
const std::map<std::pair<std::uint16_t, std::uint16_t>,
std::pair<std::uint16_t, std::uint16_t>>&
twoQubitMultiplicity,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& initialLayout,
std::size_t finalNodeId, double finalCostFixed, double finalCostHeur,
double finalLookaheadPenalty,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& finalLayout,
const std::vector<std::int16_t>& initialLayout, std::size_t finalNodeId,
double finalCostFixed, double finalCostHeur, double finalLookaheadPenalty,
const std::vector<std::int16_t>& finalLayout,
const std::vector<Exchange>& finalSwaps, std::size_t finalSearchDepth);
void splitLayer();
void logMappingResult(MappingResults& result);
Expand Down
18 changes: 9 additions & 9 deletions include/sc/Mapper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,14 +130,14 @@ class Mapper {
*
* The inverse of `locations`
*/
std::array<std::int16_t, MAX_DEVICE_QUBITS> qubits{};
std::vector<std::int16_t> qubits;
/**
* @brief containing the logical qubit currently mapped to each physical
* qubit. `locations[logical_qubit] = physical_qubit`
*
* The inverse of `qubits`
*/
std::array<std::int16_t, MAX_DEVICE_QUBITS> locations{};
std::vector<std::int16_t> locations;

MappingResults results;

Expand Down Expand Up @@ -200,10 +200,10 @@ class Mapper {
* @param collect2qBlocks if true, gates are collected in 2Q-blocks, and
* layering is performed on these blocks
*/
void processDisjointQubitLayer(
std::array<std::optional<std::size_t>, MAX_DEVICE_QUBITS>& lastLayer,
const std::optional<std::uint16_t>& control, std::uint16_t target,
qc::Operation* gate);
void
processDisjointQubitLayer(std::vector<std::optional<std::size_t>>& lastLayer,
const std::optional<std::uint16_t>& control,
std::uint16_t target, qc::Operation* gate);

/**
* Similar to processDisjointQubitLayer, but instead of treating each gate
Expand All @@ -216,7 +216,7 @@ class Mapper {
* @param gate the gate to be added to the layer
*/
void processDisjoint2qBlockLayer(
std::array<std::optional<std::size_t>, MAX_DEVICE_QUBITS>& lastLayer,
std::vector<std::optional<std::size_t>>& lastLayer,
const std::optional<std::uint16_t>& control, std::uint16_t target,
qc::Operation* gate);

Expand Down Expand Up @@ -365,8 +365,8 @@ class Mapper {
architecture->reset();
qc.reset();
layers.clear();
qubits.fill(DEFAULT_POSITION);
locations.fill(DEFAULT_POSITION);
qubits.clear();
locations.clear();

results = MappingResults();
}
Expand Down
19 changes: 8 additions & 11 deletions include/sc/heuristic/HeuristicMapper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,14 @@ class HeuristicMapper : public Mapper {
*
* The inverse of `locations`
*/
std::array<std::int16_t, MAX_DEVICE_QUBITS> qubits{};
std::vector<std::int16_t> qubits;
/**
* containing the logical qubit currently mapped to each physical qubit.
* `locations[logical_qubit] = physical_qubit`
*
* The inverse of `qubits`
*/
std::array<std::int16_t, MAX_DEVICE_QUBITS> locations{};
std::vector<std::int16_t> locations;
/** current fixed cost
*
* non-fidelity-aware: cost of all swaps used in the node
Expand Down Expand Up @@ -88,17 +88,14 @@ class HeuristicMapper : public Mapper {
* architecture */
bool validMapping = true;

explicit Node() {
qubits.fill(DEFAULT_POSITION);
locations.fill(DEFAULT_POSITION);
};
explicit Node(std::size_t nodeId) : id(nodeId) {
qubits.fill(DEFAULT_POSITION);
locations.fill(DEFAULT_POSITION);
explicit Node(std::uint16_t nqubits, const std::size_t nodeId)
: id(nodeId) {
qubits.resize(nqubits, DEFAULT_POSITION);
locations.resize(nqubits, DEFAULT_POSITION);
};
Node(std::size_t nodeId, std::size_t parentId,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& q,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& loc,
const std::vector<std::int16_t>& q,
const std::vector<std::int16_t>& loc,
const std::vector<Exchange>& sw = {},
const std::set<Edge>& valid2QGates = {},
const double initCostFixed = 0,
Expand Down
50 changes: 35 additions & 15 deletions src/sc/DataLogger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,9 @@ void DataLogger::logFinalizeLayer(
const std::map<std::pair<std::uint16_t, std::uint16_t>,
std::pair<std::uint16_t, std::uint16_t>>&
twoQubitMultiplicity,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& initialLayout,
std::size_t finalNodeId, double finalCostFixed, double finalCostHeur,
double finalLookaheadPenalty,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& finalLayout,
const std::vector<std::int16_t>& initialLayout, std::size_t finalNodeId,
double finalCostFixed, double finalCostHeur, double finalLookaheadPenalty,
const std::vector<std::int16_t>& finalLayout,
const std::vector<Exchange>& finalSwaps, std::size_t finalSearchDepth) {
if (deactivated) {
return;
Expand Down Expand Up @@ -142,16 +141,28 @@ void DataLogger::logFinalizeLayer(
}
json["single_qubit_multiplicity"] = singleQubitMultiplicity;
auto& initialLayoutJSON = json["initial_layout"];
for (std::size_t i = 0; i < nqubits; ++i) {
initialLayoutJSON[i] = initialLayout.at(i);
if (initialLayout.empty()) {
for (std::size_t i = 0; i < nqubits; ++i) {
initialLayoutJSON[i] = -1;
}
} else {
for (std::size_t i = 0; i < nqubits; ++i) {
initialLayoutJSON[i] = initialLayout.at(i);
}
}
json["final_node_id"] = finalNodeId;
json["final_cost_fixed"] = finalCostFixed;
json["final_cost_heur"] = finalCostHeur;
json["final_lookahead_penalty"] = finalLookaheadPenalty;
auto& finalLayoutJSON = json["final_layout"];
for (std::size_t i = 0; i < nqubits; ++i) {
finalLayoutJSON[i] = finalLayout.at(i);
if (finalLayout.empty()) {
for (std::size_t i = 0; i < nqubits; ++i) {
finalLayoutJSON[i] = -1;
}
} else {
for (std::size_t i = 0; i < nqubits; ++i) {
finalLayoutJSON[i] = finalLayout.at(i);
}
}
if (finalSwaps.empty()) {
json["final_swaps"] = nlohmann::basic_json<>::array();
Expand Down Expand Up @@ -197,11 +208,13 @@ void DataLogger::splitLayer() {
std::to_string(splitIndex) + ".json");
}

void DataLogger::logSearchNode(
std::size_t layerIndex, std::size_t nodeId, std::size_t parentId,
double costFixed, double costHeur, double lookaheadPenalty,
const std::array<std::int16_t, MAX_DEVICE_QUBITS>& qubits,
bool validMapping, const std::vector<Exchange>& swaps, std::size_t depth) {
void DataLogger::logSearchNode(std::size_t layerIndex, std::size_t nodeId,
std::size_t parentId, double costFixed,
double costHeur, double lookaheadPenalty,
const std::vector<std::int16_t>& qubits,
bool validMapping,
const std::vector<Exchange>& swaps,
std::size_t depth) {
if (deactivated) {
return;
}
Expand All @@ -219,8 +232,15 @@ void DataLogger::logSearchNode(
}
of << nodeId << ";" << parentId << ";" << costFixed << ";" << costHeur << ";"
<< lookaheadPenalty << ";" << validMapping << ";" << depth << ";";
for (std::size_t i = 0; i < nqubits; ++i) {
of << qubits.at(i) << ",";

if (!qubits.empty()) {
for (std::size_t i = 0; i < nqubits; ++i) {
of << qubits.at(i) << ",";
}
} else {
for (std::size_t i = 0; i < nqubits; ++i) {
of << "-1,";
}
}
if (nqubits > 0) {
of.seekp(-1, std::ios_base::cur); // remove last comma
Expand Down
40 changes: 17 additions & 23 deletions src/sc/Mapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ void Mapper::initResults() {

Mapper::Mapper(qc::QuantumComputation quantumComputation, Architecture& arch)
: qc(std::move(quantumComputation)), architecture(&arch) {
qubits.fill(DEFAULT_POSITION);
locations.fill(DEFAULT_POSITION);
qubits.resize(architecture->getNqubits(), DEFAULT_POSITION);
locations.resize(architecture->getNqubits(), DEFAULT_POSITION);

// strip away qubits that are not used in the circuit
qc.stripIdleQubits(true, true);
Expand All @@ -48,7 +48,7 @@ Mapper::Mapper(qc::QuantumComputation quantumComputation, Architecture& arch)
}

void Mapper::processDisjointQubitLayer(
std::array<std::optional<std::size_t>, MAX_DEVICE_QUBITS>& lastLayer,
std::vector<std::optional<std::size_t>>& lastLayer,
const std::optional<std::uint16_t>& control, const std::uint16_t target,
qc::Operation* gate) {
std::size_t layer = 0;
Expand Down Expand Up @@ -83,7 +83,7 @@ void Mapper::processDisjointQubitLayer(
}

void Mapper::processDisjoint2qBlockLayer(
std::array<std::optional<std::size_t>, MAX_DEVICE_QUBITS>& lastLayer,
std::vector<std::optional<std::size_t>>& lastLayer,
const std::optional<std::uint16_t>& control, const std::uint16_t target,
qc::Operation* gate) {
std::size_t layer = 0;
Expand Down Expand Up @@ -132,7 +132,8 @@ void Mapper::processDisjoint2qBlockLayer(

void Mapper::createLayers() {
const auto& config = results.config;
std::array<std::optional<std::size_t>, MAX_DEVICE_QUBITS> lastLayer{};
auto lastLayer = std::vector<std::optional<std::size_t>>(
architecture->getNqubits(), std::nullopt);

auto qubitsInLayer = std::set<std::uint16_t>{};

Expand Down Expand Up @@ -447,28 +448,24 @@ void Mapper::finalizeMappedCircuit() {
if (architecture->getNqubits() > qcMapped.getNqubits()) {
for (auto logicalQubit = qcMapped.getNqubits();
logicalQubit < architecture->getNqubits(); ++logicalQubit) {
std::optional<qc::Qubit> physicalQubit = std::nullopt;
auto physicalQubit = static_cast<qc::Qubit>(logicalQubit);

// check if the corresponding physical qubit is already in use
if (qcMapped.initialLayout.find(static_cast<qc::Qubit>(logicalQubit)) !=
qcMapped.initialLayout.end()) {
// get the next unused physical qubit
for (physicalQubit = 0; *physicalQubit < architecture->getNqubits();
++(*physicalQubit)) {
if (qcMapped.initialLayout.find(*physicalQubit) ==
for (physicalQubit = 0; physicalQubit < architecture->getNqubits();
++(physicalQubit)) {
if (qcMapped.initialLayout.find(physicalQubit) ==
qcMapped.initialLayout.end()) {
break;
}
}
} else {
physicalQubit = static_cast<qc::Qubit>(logicalQubit);
}

assert(physicalQubit.has_value());

// the added logical qubits are not used in the circuit itself, so they
// are regarded garbage
qcMapped.addAncillaryQubit(*physicalQubit, std::nullopt);
qcMapped.addAncillaryQubit(physicalQubit, std::nullopt);
}
}
// unify quantum registers
Expand All @@ -484,28 +481,25 @@ void Mapper::placeRemainingArchitectureQubits() {
if (qc.getNqubits() < architecture->getNqubits()) {
for (auto logical = qc.getNqubits(); logical < architecture->getNqubits();
++logical) {
std::optional<qc::Qubit> physical = std::nullopt;
auto physical = static_cast<qc::Qubit>(logical);

// check if the corresponding physical qubit is already in use
if (qcMapped.initialLayout.find(static_cast<qc::Qubit>(logical)) !=
qcMapped.initialLayout.end()) {
// get the next unused physical qubit
for (physical = 0; *physical < architecture->getNqubits();
++(*physical)) {
if (qcMapped.initialLayout.find(*physical) ==
for (physical = 0; physical < architecture->getNqubits();
++(physical)) {
if (qcMapped.initialLayout.find(physical) ==
qcMapped.initialLayout.end()) {
break;
}
}
} else {
physical = static_cast<qc::Qubit>(logical);
}

assert(physical.has_value());
qubits.at(*physical) = static_cast<std::int16_t>(logical);
qubits.at(physical) = static_cast<std::int16_t>(logical);

// mark architecture qubit as ancillary and garbage
qcMapped.initialLayout[*physical] = static_cast<qc::Qubit>(logical);
qcMapped.initialLayout[physical] = static_cast<qc::Qubit>(logical);
qcMapped.setLogicalQubitAncillary(static_cast<qc::Qubit>(logical));
qcMapped.setLogicalQubitGarbage(static_cast<qc::Qubit>(logical));
}
Expand Down
4 changes: 2 additions & 2 deletions src/sc/heuristic/HeuristicMapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -567,8 +567,8 @@ HeuristicMapper::Node HeuristicMapper::aStarMap(size_t layer, bool reverse) {
singleQubitMultiplicities.at(layer);
const TwoQubitMultiplicity& twoQubitMultiplicity =
twoQubitMultiplicities.at(layer);
Node node(nextNodeId++);
Node bestDoneNode(0);
Node node(architecture->getNqubits(), nextNodeId++);
Node bestDoneNode(architecture->getNqubits(), 0);
bool validMapping = false;

mapUnmappedGates(layer);
Expand Down
17 changes: 12 additions & 5 deletions test/sc/heuristic/test_heuristic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -815,7 +815,8 @@ TEST_P(TestHeuristics, HeuristicProperties) {
auto results = ibmqYorktownMapper->getResults();
for (std::size_t i = 0; i < results.layerHeuristicBenchmark.size(); ++i) {
allNodes.emplace_back(
results.layerHeuristicBenchmark.at(i).generatedNodes);
results.layerHeuristicBenchmark.at(i).generatedNodes,
HeuristicMapper::Node{ibmqYorktown.getNqubits(), 0});
layerNames.emplace_back("on ibmq_yorktown in layer " +
std::to_string(i));
parseNodesFromDatalog(settings.dataLoggingPath, i, allNodes.back());
Expand All @@ -831,7 +832,8 @@ TEST_P(TestHeuristics, HeuristicProperties) {
auto results = ibmqLondonMapper->getResults();
for (std::size_t i = 0; i < results.layerHeuristicBenchmark.size(); ++i) {
allNodes.emplace_back(
results.layerHeuristicBenchmark.at(i).generatedNodes);
results.layerHeuristicBenchmark.at(i).generatedNodes,
HeuristicMapper::Node{ibmqLondon.getNqubits(), 0});
layerNames.emplace_back("on ibmq_london in layer " + std::to_string(i));
parseNodesFromDatalog(settings.dataLoggingPath, i, allNodes.back());
finalSolutionIds.push_back(
Expand All @@ -848,7 +850,8 @@ TEST_P(TestHeuristics, HeuristicProperties) {
auto results = ibmQX5Mapper->getResults();
for (std::size_t i = 0; i < results.layerHeuristicBenchmark.size(); ++i) {
allNodes.emplace_back(
results.layerHeuristicBenchmark.at(i).generatedNodes);
results.layerHeuristicBenchmark.at(i).generatedNodes,
HeuristicMapper::Node{ibmQX5.getNqubits(), 0});
layerNames.emplace_back("on ibmQX5 in layer " + std::to_string(i));
parseNodesFromDatalog(settings.dataLoggingPath, i, allNodes.back());
finalSolutionIds.push_back(
Expand Down Expand Up @@ -909,7 +912,9 @@ TEST_P(TestHeuristics, HeuristicProperties) {
}
std::vector<std::int16_t> finalLayout{};
std::copy(finalSolutionNode.qubits.begin(),
finalSolutionNode.qubits.begin() + finalLayoutLastIndex + 1,
finalSolutionNode.qubits.begin() +
static_cast<std::vector<int16_t>::difference_type>(
finalLayoutLastIndex + 1),
std::back_inserter(finalLayout));
EXPECT_EQ(finalLayout, OPTIMAL_SOLUTIONS.at(circuitName).at(i))
<< "Heuristic " << toString(settings.heuristic)
Expand Down Expand Up @@ -1180,6 +1185,7 @@ TEST(Functionality, DataLoggerAfterClose) {
auto dataLogger = std::make_unique<DataLogger>(dataLoggingPath, arch, qc);
const qc::CompoundOperation compOp{};
Exchange teleport(0, 2, 1, qc::OpType::Teleportation);

dataLogger->logSearchNode(0, 0, 0, 0., 0., 0., {}, false, {{teleport}}, 0);
dataLogger->logSearchNode(1, 0, 0, 0., 0., 0., {}, false, {}, 0);
dataLogger->splitLayer();
Expand Down Expand Up @@ -1447,7 +1453,8 @@ TEST(Functionality, DataLogger) {
architecture.getNqubits());

std::vector<HeuristicMapper::Node> nodes{
results.layerHeuristicBenchmark.at(i).generatedNodes};
results.layerHeuristicBenchmark.at(i).generatedNodes,
HeuristicMapper::Node{architecture.getNqubits(), 0}};
parseNodesFromDatalog(settings.dataLoggingPath, i, nodes);

if (finalNodeId >= nodes.size() ||
Expand Down

0 comments on commit db990fa

Please sign in to comment.