From 814abc90b3e936b69cc067b037a34c42bbf3e157 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Thu, 30 Nov 2023 17:52:26 +0100 Subject: [PATCH 01/34] starting SolutionReceiver --- .../generic/solution_receiver/models.py | 75 +++++++++++++++++++ .../generic/solution_receiver/process.py | 58 ++++++++++++++ 2 files changed, 133 insertions(+) create mode 100644 src/lava/lib/optimization/solvers/generic/solution_receiver/models.py create mode 100644 src/lava/lib/optimization/solvers/generic/solution_receiver/process.py diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py new file mode 100644 index 00000000..8fc1900f --- /dev/null +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -0,0 +1,75 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import numpy as np +from lava.lib.optimization.solvers.generic.monitoring_processes\ + .solution_readout.process import SolutionReadout +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import ( + PyLoihiProcessModel, + PyAsyncProcessModel +) +from lava.magma.core.model.py.ports import PyInPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.resources import CPU +from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol + +from lava.lib.optimization.solvers.generic.solution_receiver.process import \ + SolutionReceiver + + +@implements(SolutionReceiver, protocol=AsyncProtocol) +@requires(CPU) +class SolutionReceiverPyModel(PyAsyncProcessModel): + """CPU model for the SolutionReadout process. + The process receives two types of messages, an updated cost and the + state of + the solver network representing the current candidate solution to an + OptimizationProblem. Additionally, a target cost can be defined by the + user, once this cost is reached by the solver network, this process + will request the runtime service to pause execution. + """ + + best_state: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) + best_timestep: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) + best_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) + + state_in: PyInPort = LavaPyType( + PyInPort.VEC_DENSE, np.int32, precision=32 + ) + cost_integrator_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, + precision=32) + + def run_async(self): + self.best_cost = self.cost_integrator_in.recv() + + self.timestep = self.cost_integrator_in.recv() + + compressed_states = self.state_in.recv() + + self.best_state = self._decompress_state(compressed_states) + + @staticmethod + def _decompress_state(self, compressed_states): + """Add info!""" + + boolean_array = (compressed_states[:, None] & ( + 1 << np.arange(31, -1, -1))) != 0 + + # reshape into a 1D array + boolean_array.reshape(-1) + + return boolean_array.astype(np.int8) + + +def test_code(): + + # Assuming you have a 32-bit integer numpy array + original_array = np.array([4294967295, 2147483647, 0, 8983218], + dtype=np.uint32) + + # Use bitwise AND operation to convert each integer to a boolean array + boolean_array = (original_array[:, None] & (1 << np.arange(31, -1, -1))) != 0 + + # Display the result + print(boolean_array) \ No newline at end of file diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py new file mode 100644 index 00000000..bdbbdc12 --- /dev/null +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -0,0 +1,58 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import numpy as np +import typing as ty + +from lava.magma.core.process.ports.ports import InPort +from lava.magma.core.process.process import AbstractProcess, LogConfig +from lava.magma.core.process.variable import Var + + +class SolutionReceiver(AbstractProcess): + """Process to readout solution from SNN and make it available on host. + + Parameters + ---------- + shape: The shape of the set of nodes, or process, which state will be read. + target_cost: cost value at which, once attained by the network, + this process will stop execution. + name: Name of the Process. Default is 'Process_ID', where ID is an + integer value that is determined automatically. + log_config: Configuration options for logging. + time_steps_per_algorithmic_step: the number of iteration steps that a + single algorithmic step requires. This value is required to decode the + variable values from the spk_hist of a process. + + Attributes + ---------- + read_solution: InPort + A message received on this ports signifies the process + should call read on its RefPort. + ref_port: RefPort + A reference port to a variable in another process which state + will be remotely accessed upon read request. Here, it reads the + current variables assignment by a solver to an optimization problem. + target_cost: Var + Cost value at which, once attained by the network. + + """ + + def __init__( + self, + shape: ty.Tuple[int, ...], + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, + ) -> None: + super().__init__( + shape=shape, + name=name, + log_config=log_config, + ) + num_spike_integrators = np.ceil(shape[0] / 32.).astype(int) + + self.best_state = Var(shape=shape, init=0) + self.best_timestep = Var(shape=(1,), init=0) + self.best_cost = Var(shape=(1,), init=0) + self.cost_integrator_in = InPort(shape=(1,)) + self.state_in = InPort(shape=(num_spike_integrators,)) From 313881a82f667c2621b2fa710ec6cb3130451852 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 5 Dec 2023 13:56:18 +0100 Subject: [PATCH 02/34] transferring unit test from spikeIO --- .../generic/solution_receiver/models.py | 125 ++++++++++++++++-- .../generic/solution_receiver/process.py | 93 ++++++++++++- 2 files changed, 201 insertions(+), 17 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index 8fc1900f..b018a5c3 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -11,12 +11,18 @@ ) from lava.magma.core.model.py.ports import PyInPort from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.model.sub.model import AbstractSubProcessModel from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol from lava.lib.optimization.solvers.generic.solution_receiver.process import \ - SolutionReceiver - + ( + SolutionReceiver, SpikeIntegrator +) +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.proc.sparse.process import Sparse +from lava.utils.weightutils import SignMode +from lava.proc import embedded_io as eio @implements(SolutionReceiver, protocol=AsyncProtocol) @requires(CPU) @@ -37,31 +43,41 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): state_in: PyInPort = LavaPyType( PyInPort.VEC_DENSE, np.int32, precision=32 ) - cost_integrator_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, + cost_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, precision=32) + timestep_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, + precision=32) def run_async(self): - self.best_cost = self.cost_integrator_in.recv() + buffer = self.cost_in.recv() + + self.best_cost =buffer - self.timestep = self.cost_integrator_in.recv() + print(self.best_cost) + self.best_timestep = self.timestep_in.recv() + print(self.best_timestep) compressed_states = self.state_in.recv() - self.best_state = self._decompress_state(compressed_states) + print(compressed_states) + + buffer = self._decompress_state(compressed_states).copy() + print(buffer) + self.best_state[:] = buffer[0] + print(self.best_state) + + self._req_pause = True @staticmethod - def _decompress_state(self, compressed_states): + def _decompress_state(compressed_states): """Add info!""" - boolean_array = (compressed_states[:, None] & ( 1 << np.arange(31, -1, -1))) != 0 - # reshape into a 1D array boolean_array.reshape(-1) - return boolean_array.astype(np.int8) - +""" def test_code(): # Assuming you have a 32-bit integer numpy array @@ -72,4 +88,89 @@ def test_code(): boolean_array = (original_array[:, None] & (1 << np.arange(31, -1, -1))) != 0 # Display the result - print(boolean_array) \ No newline at end of file + print(boolean_array) +""" + +@implements(proc=SolutionReadout, protocol=LoihiProtocol) +@requires(CPU) +class SolutionReadoutModel(AbstractSubProcessModel): + """Model for the SolutionReadout process. + + """ + + def __init__(self, proc): + + # Define the dense input layer + num_variables = np.prod(proc.proc_params.get("shape")) + num_spike_integrators = np.ceil(num_variables / 32.).astype(int) + + weights = self._get_input_weights(num_variables, num_spike_integrators) + + self.synapses_in = Sparse(weights=weights, + sign_mode=SignMode.EXCITATORY, + num_weight_bits=1, + num_message_bits=32) + + self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators)) + + self.out_adapter_cost_integrator = eio.spike.NxToPyAdapter( + shape=(1,), + num_message_bits=32) + self.out_adapter_best_state = eio.spike.NxToPyAdapter( + shape=(num_spike_integrators,), + num_message_bits=32) + + self.solution_receiver = SolutionReceiver( + shape=(1,), + best_cost_init = self.best_cost.get(), + best_state_init = self.best_state.get(), + best_timestep_init = self.best_timestep.get()) + + # Connect the parent InPort to the InPort of the child-Process. + proc.in_ports.states_in.connect(self.synapses_in.s_in) + proc.in_ports.cost_integrator_in.connect( + self.out_adapter_cost_integrator.inp) + + # Connect intermediate ports + self.synapses_in.connect(self.spike_integrators.state_in) + self.spike_integrators.state_out.connect( + self.out_adapter_best_state.inp) + self.out_adapter_best_state.out.connect(self.solution_receiver.state_in) + + self.out_adapter_cost_integrator.out.connect( + self.solution_receiver.cost_integrator_in) + + # Create aliases for variables + proc.vars.best_state.alias(self.solution_receiver.best_state) + proc.vars.best_timestep.alias(self.solution_receiver.best_timestep) + proc.vars.best_cost.alias(self.solution_receiver.best_cost) + + @staticmethod + def _get_input_weights(num_vars, num_spike_int, num_vars_per_int = 32): + """To be verified. Deprecated due to efficiency""" + weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) + for spike_integrator in range(num_spike_int - 1): + variable_start = num_vars_per_int*spike_integrator + weights[spike_integrator, variable_start:variable_start + num_vars_per_int] = 1 + + # The last spike integrator might be connected by less than 32 neurons + # This happens when mod(num_variables, num_vars_per_int) != 0 + weights[-1, num_vars_per_int*(spike_integrator + 1): -1] = 1 + + return weights + + @staticmethod + def _get_input_weights_index(num_vars, num_spike_int, num_vars_per_int=32): + """To be verified""" + weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) + + # Compute the indices for setting the values to 1 + indices = np.arange(0, num_vars_per_int * (num_spike_int - 1), num_vars_per_int) + + # Set the values to 1 using array indexing + weights[:num_spike_int-1, indices:indices + num_vars_per_int] = 1 + + # Set the values for the last spike integrator + weights[-1, num_vars_per_int * (num_spike_int - 1):num_vars] = 1 + + return weights diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index bdbbdc12..acdc552e 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -4,11 +4,90 @@ import numpy as np import typing as ty -from lava.magma.core.process.ports.ports import InPort +from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.process import AbstractProcess, LogConfig from lava.magma.core.process.variable import Var +class SpikeIntegrator(AbstractProcess): + """GradedVec + Graded spike vector layer. Accumulates and forwards 32bit spikes. + + Parameters + ---------- + shape: tuple(int) + number and topology of neurons + """ + + def __init__( + self, + shape: ty.Tuple[int, ...]) -> None: + + super().__init__(shape=shape) + + self.a_in = InPort(shape=shape) + self.s_out = OutPort(shape=shape) + + +class SolutionReadout(AbstractProcess): + r"""Process which implementation holds the solution readout layer + on the solver of an optimization problem. + + Attributes + ---------- + a_in: InPort + The addition of all inputs (per dynamical system) at this timestep + will be received by this port. + s_out: OutPort + The payload to be exchanged between the underlying dynamical systems + when these fire. + local_cost: OutPort + The cost components per dynamical system underlying these + variables, i.e., c_i = sum_j{Q_{ij} \cdot x_i} will be sent through + this port. The cost integrator will then complete the cost computation + by adding all contributions, i.e., x^T \cdot Q \cdot x = sum_i{c_i}. + variable_assignment: Var + Holds the current value assigned to the variables by + the solver network. + """ + + def __init__( + self, + shape_in: ty.Tuple[int, ...], + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, + ) -> None: + """ + Parameters + ---------- + shape: tuple + A tuple of the form (number of variables, domain size). + cost_diagonal: npty.ArrayLike + The diagonal of the coefficient of the quadratic term on the cost + function. + cost_off_diagonal: npty.ArrayLike + The off-diagonal of the coefficient of the quadratic term on the + cost function. + hyperparameters: dict, optional + name: str, optional + Name of the Process. Default is 'Process_ID', where ID is an integer + value that is determined automatically. + log_config: LogConfig, optional + Configuration options for logging.z""" + super().__init__( + shape=shape_in, + name=name, + log_config=log_config, + ) + + num_variables = np.prod(shape_in) + self.states_in = InPort(shape=(num_variables,)) + self.cost_integrator_in = InPort((1,)) + self.best_state = Var(shape=(num_variables,), init=0) + self.best_timestep = Var(shape=(1,), init=0) + self.best_cost = Var(shape=(1,), init=0) + + class SolutionReceiver(AbstractProcess): """Process to readout solution from SNN and make it available on host. @@ -41,6 +120,9 @@ class SolutionReceiver(AbstractProcess): def __init__( self, shape: ty.Tuple[int, ...], + best_cost_init: int, + best_state_init: int, + best_timestep_init: int, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: @@ -51,8 +133,9 @@ def __init__( ) num_spike_integrators = np.ceil(shape[0] / 32.).astype(int) - self.best_state = Var(shape=shape, init=0) - self.best_timestep = Var(shape=(1,), init=0) - self.best_cost = Var(shape=(1,), init=0) - self.cost_integrator_in = InPort(shape=(1,)) + self.best_state = Var(shape=shape, init=best_state_init) + self.best_timestep = Var(shape=(1,), init=best_timestep_init) + self.best_cost = Var(shape=(1,), init=best_cost_init) + self.cost_in = InPort(shape=(1,)) + self.timestep_in = InPort(shape=(1,)) self.state_in = InPort(shape=(num_spike_integrators,)) From 597e9d5fadaf4844aac7aa9edb991572a3ef8b71 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 5 Dec 2023 16:16:54 +0100 Subject: [PATCH 03/34] spikeIO working via LMT --- .../generic/solution_receiver/models.py | 53 +++++++++++-------- .../generic/solution_receiver/process.py | 6 ++- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index b018a5c3..9481b257 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -39,6 +39,7 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): best_state: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) best_timestep: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) best_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) + num_message_bits: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) state_in: PyInPort = LavaPyType( PyInPort.VEC_DENSE, np.int32, precision=32 @@ -49,30 +50,36 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): precision=32) def run_async(self): - buffer = self.cost_in.recv() + num_message_bits = self.num_message_bits[0] - self.best_cost =buffer + buffer_cost = 0 + buffer_timestep = 0 + while buffer_cost == 0: + buffer_cost = self.cost_in.recv() + buffer_timestep = self.timestep_in.recv() - print(self.best_cost) + # CProcModel currently has integer overflow + if buffer_cost > 0: + buffer_cost -= 2**num_message_bits - self.best_timestep = self.timestep_in.recv() - print(self.best_timestep) - compressed_states = self.state_in.recv() + self.best_cost = buffer_cost + self.best_timestep = buffer_timestep - print(compressed_states) + compressed_states = np.zeros(self.state_in.shape) + while not np.any(compressed_states): + compressed_states = self.state_in.recv() + + buffer = self._decompress_state(compressed_states, num_message_bits).copy() - buffer = self._decompress_state(compressed_states).copy() - print(buffer) self.best_state[:] = buffer[0] - print(self.best_state) self._req_pause = True @staticmethod - def _decompress_state(compressed_states): + def _decompress_state(compressed_states, num_message_bits): """Add info!""" boolean_array = (compressed_states[:, None] & ( - 1 << np.arange(31, -1, -1))) != 0 + 1 << np.arange(num_message_bits - 1, -1, -1))) != 0 # reshape into a 1D array boolean_array.reshape(-1) return boolean_array.astype(np.int8) @@ -99,26 +106,29 @@ class SolutionReadoutModel(AbstractSubProcessModel): """ def __init__(self, proc): + num_message_bits = proc.proc_params.get("num_message_bits") # Define the dense input layer num_variables = np.prod(proc.proc_params.get("shape")) - num_spike_integrators = np.ceil(num_variables / 32.).astype(int) + num_spike_integrators = np.ceil(num_variables / num_message_bits).astype(int) - weights = self._get_input_weights(num_variables, num_spike_integrators) + weights = self._get_input_weights(num_vars=num_variables, + num_spike_int=num_spike_integrators, + num_spike_integrators=num_message_bits) self.synapses_in = Sparse(weights=weights, sign_mode=SignMode.EXCITATORY, num_weight_bits=1, - num_message_bits=32) + num_message_bits=num_message_bits) - self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators)) + self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) self.out_adapter_cost_integrator = eio.spike.NxToPyAdapter( shape=(1,), - num_message_bits=32) + num_message_bits=num_message_bits) self.out_adapter_best_state = eio.spike.NxToPyAdapter( shape=(num_spike_integrators,), - num_message_bits=32) + num_message_bits=num_message_bits) self.solution_receiver = SolutionReceiver( shape=(1,), @@ -146,21 +156,22 @@ def __init__(self, proc): proc.vars.best_cost.alias(self.solution_receiver.best_cost) @staticmethod - def _get_input_weights(num_vars, num_spike_int, num_vars_per_int = 32): + def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): """To be verified. Deprecated due to efficiency""" weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) for spike_integrator in range(num_spike_int - 1): variable_start = num_vars_per_int*spike_integrator weights[spike_integrator, variable_start:variable_start + num_vars_per_int] = 1 - # The last spike integrator might be connected by less than 32 neurons + # The last spike integrator might be connected by less than + # num_vars_per_int neurons # This happens when mod(num_variables, num_vars_per_int) != 0 weights[-1, num_vars_per_int*(spike_integrator + 1): -1] = 1 return weights @staticmethod - def _get_input_weights_index(num_vars, num_spike_int, num_vars_per_int=32): + def _get_input_weights_index(num_vars, num_spike_int, num_vars_per_int): """To be verified""" weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index acdc552e..ea3fc49b 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -54,6 +54,7 @@ class SolutionReadout(AbstractProcess): def __init__( self, shape_in: ty.Tuple[int, ...], + num_message_bits = 24, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: @@ -76,6 +77,7 @@ def __init__( Configuration options for logging.z""" super().__init__( shape=shape_in, + num_message_bits=num_message_bits, name=name, log_config=log_config, ) @@ -123,6 +125,7 @@ def __init__( best_cost_init: int, best_state_init: int, best_timestep_init: int, + num_message_bits: int = 24, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: @@ -131,11 +134,12 @@ def __init__( name=name, log_config=log_config, ) - num_spike_integrators = np.ceil(shape[0] / 32.).astype(int) + num_spike_integrators = np.ceil(shape[0] / num_message_bits).astype(int) self.best_state = Var(shape=shape, init=best_state_init) self.best_timestep = Var(shape=(1,), init=best_timestep_init) self.best_cost = Var(shape=(1,), init=best_cost_init) + self.num_message_bits = Var(shape=(1,), init=num_message_bits) self.cost_in = InPort(shape=(1,)) self.timestep_in = InPort(shape=(1,)) self.state_in = InPort(shape=(num_spike_integrators,)) From 3b70b6d526d3fb3a7ac3dcdb8f9e830626e21645 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 5 Dec 2023 17:58:12 +0100 Subject: [PATCH 04/34] starting unittest for SolutionReadout --- .../solvers/generic/solution_receiver/models.py | 15 ++++++++++----- .../solvers/generic/solution_receiver/process.py | 3 ++- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index 9481b257..7e89638a 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -123,7 +123,10 @@ def __init__(self, proc): self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) - self.out_adapter_cost_integrator = eio.spike.NxToPyAdapter( + self.out_adapter_cost_in = eio.spike.NxToPyAdapter( + shape=(1,), + num_message_bits=num_message_bits) + self.out_adapter_timestep_in = eio.spike.NxToPyAdapter( shape=(1,), num_message_bits=num_message_bits) self.out_adapter_best_state = eio.spike.NxToPyAdapter( @@ -138,8 +141,8 @@ def __init__(self, proc): # Connect the parent InPort to the InPort of the child-Process. proc.in_ports.states_in.connect(self.synapses_in.s_in) - proc.in_ports.cost_integrator_in.connect( - self.out_adapter_cost_integrator.inp) + proc.in_ports.cost_in.connect(self.out_adapter_cost_in.inp) + proc.in_ports.timestep_in.connect(self.out_adapter_timestep_in.inp) # Connect intermediate ports self.synapses_in.connect(self.spike_integrators.state_in) @@ -147,8 +150,10 @@ def __init__(self, proc): self.out_adapter_best_state.inp) self.out_adapter_best_state.out.connect(self.solution_receiver.state_in) - self.out_adapter_cost_integrator.out.connect( - self.solution_receiver.cost_integrator_in) + self.out_adapter_cost_in.out.connect( + self.solution_receiver.cost_in) + self.out_adapter_timestep_in.out.connect( + self.solution_receiver.timestep_in) # Create aliases for variables proc.vars.best_state.alias(self.solution_receiver.best_state) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index ea3fc49b..8c18c80b 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -84,7 +84,8 @@ def __init__( num_variables = np.prod(shape_in) self.states_in = InPort(shape=(num_variables,)) - self.cost_integrator_in = InPort((1,)) + self.cost_in = InPort((1,)) + self.timestep_in = InPort((1,)) self.best_state = Var(shape=(num_variables,), init=0) self.best_timestep = Var(shape=(1,), init=0) self.best_cost = Var(shape=(1,), init=0) From 2646faf92b0c8630f38b1d09ee9ba831c5c4979e Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Thu, 7 Dec 2023 05:02:16 -0800 Subject: [PATCH 05/34] Remove NxToPy adapters from SolutionReadout --- .../generic/solution_receiver/models.py | 22 +++---------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index 7e89638a..620e6f52 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -123,16 +123,6 @@ def __init__(self, proc): self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) - self.out_adapter_cost_in = eio.spike.NxToPyAdapter( - shape=(1,), - num_message_bits=num_message_bits) - self.out_adapter_timestep_in = eio.spike.NxToPyAdapter( - shape=(1,), - num_message_bits=num_message_bits) - self.out_adapter_best_state = eio.spike.NxToPyAdapter( - shape=(num_spike_integrators,), - num_message_bits=num_message_bits) - self.solution_receiver = SolutionReceiver( shape=(1,), best_cost_init = self.best_cost.get(), @@ -141,19 +131,13 @@ def __init__(self, proc): # Connect the parent InPort to the InPort of the child-Process. proc.in_ports.states_in.connect(self.synapses_in.s_in) - proc.in_ports.cost_in.connect(self.out_adapter_cost_in.inp) - proc.in_ports.timestep_in.connect(self.out_adapter_timestep_in.inp) + proc.in_ports.cost_in.connect(self.solution_receiver.cost_in) + proc.in_ports.timestep_in.connect(self.solution_receiver.timestep_in) # Connect intermediate ports self.synapses_in.connect(self.spike_integrators.state_in) self.spike_integrators.state_out.connect( - self.out_adapter_best_state.inp) - self.out_adapter_best_state.out.connect(self.solution_receiver.state_in) - - self.out_adapter_cost_in.out.connect( - self.solution_receiver.cost_in) - self.out_adapter_timestep_in.out.connect( - self.solution_receiver.timestep_in) + self.solution_receiver.state_in) # Create aliases for variables proc.vars.best_state.alias(self.solution_receiver.best_state) From 02def670caae7cce57d62d1df0d3815b32db1c4d Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Thu, 7 Dec 2023 05:51:57 -0800 Subject: [PATCH 06/34] Implement SolutionReceiver with a single port --- .../generic/solution_receiver/models.py | 35 +++++-------------- .../generic/solution_receiver/process.py | 6 ++-- 2 files changed, 11 insertions(+), 30 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index 620e6f52..d300b73e 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -41,37 +41,20 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): best_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) num_message_bits: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) - state_in: PyInPort = LavaPyType( + results_in: PyInPort = LavaPyType( PyInPort.VEC_DENSE, np.int32, precision=32 ) - cost_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, - precision=32) - timestep_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, - precision=32) def run_async(self): num_message_bits = self.num_message_bits[0] - buffer_cost = 0 - buffer_timestep = 0 - while buffer_cost == 0: - buffer_cost = self.cost_in.recv() - buffer_timestep = self.timestep_in.recv() - - # CProcModel currently has integer overflow - if buffer_cost > 0: - buffer_cost -= 2**num_message_bits - - self.best_cost = buffer_cost - self.best_timestep = buffer_timestep - - compressed_states = np.zeros(self.state_in.shape) - while not np.any(compressed_states): - compressed_states = self.state_in.recv() - - buffer = self._decompress_state(compressed_states, num_message_bits).copy() - - self.best_state[:] = buffer[0] + results_buffer = 0 + while not np.any(results_buffer): + results_buffer = self.results_in.recv() + + self.best_cost = results_buffer[0] + self.best_timestep = results_buffer[1] + self.best_state[:] = self._decompress_state(results_buffer[2:], 24)[:self.best_state.shape[0]] self._req_pause = True @@ -82,7 +65,7 @@ def _decompress_state(compressed_states, num_message_bits): 1 << np.arange(num_message_bits - 1, -1, -1))) != 0 # reshape into a 1D array boolean_array.reshape(-1) - return boolean_array.astype(np.int8) + return boolean_array.astype(np.int8).flatten() """ def test_code(): diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index 8c18c80b..9cfaf7a8 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -135,12 +135,10 @@ def __init__( name=name, log_config=log_config, ) - num_spike_integrators = np.ceil(shape[0] / num_message_bits).astype(int) + num_spike_integrators = 2 + np.ceil(shape[0] / num_message_bits).astype(int) self.best_state = Var(shape=shape, init=best_state_init) self.best_timestep = Var(shape=(1,), init=best_timestep_init) self.best_cost = Var(shape=(1,), init=best_cost_init) self.num_message_bits = Var(shape=(1,), init=num_message_bits) - self.cost_in = InPort(shape=(1,)) - self.timestep_in = InPort(shape=(1,)) - self.state_in = InPort(shape=(num_spike_integrators,)) + self.results_in = InPort(shape=(num_spike_integrators,)) From 20e26757ed3356b59c9dc38664c0c4fa5eb176db Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Fri, 8 Dec 2023 08:25:20 -0800 Subject: [PATCH 07/34] Incremental update to spikeio readout --- .../solvers/generic/solution_reader/models.py | 2 +- .../generic/solution_receiver/models.py | 102 +++++++++++++----- .../solution_readout/test_solution_readout.py | 2 +- 3 files changed, 78 insertions(+), 28 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_reader/models.py b/src/lava/lib/optimization/solvers/generic/solution_reader/models.py index 4008eb80..bef081f4 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_reader/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_reader/models.py @@ -30,7 +30,7 @@ def __init__(self, proc): time_steps_per_algorithmic_step=time_steps_per_algorithmic_step, ) self.read_gate.cost_out.connect(self.solution_readout.cost_in) - self.read_gate.solution_out.connect(self.solution_readout.read_solution) + self.read_gate.solution_out.connect(self.solution_readout.state_in) self.read_gate.send_pause_request.connect( self.solution_readout.timestep_in ) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index d300b73e..0e21ec9f 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -20,9 +20,10 @@ SolutionReceiver, SpikeIntegrator ) from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.proc.sparse.process import Sparse +from lava.proc.sparse.process import Sparse, DelaySparse from lava.utils.weightutils import SignMode from lava.proc import embedded_io as eio +from scipy.sparse import csr_matrix @implements(SolutionReceiver, protocol=AsyncProtocol) @requires(CPU) @@ -51,6 +52,7 @@ def run_async(self): results_buffer = 0 while not np.any(results_buffer): results_buffer = self.results_in.recv() + print(results_buffer, ".....................................") self.best_cost = results_buffer[0] self.best_timestep = results_buffer[1] @@ -93,34 +95,68 @@ def __init__(self, proc): # Define the dense input layer num_variables = np.prod(proc.proc_params.get("shape")) - num_spike_integrators = np.ceil(num_variables / num_message_bits).astype(int) - - weights = self._get_input_weights(num_vars=num_variables, - num_spike_int=num_spike_integrators, - num_spike_integrators=num_message_bits) - - self.synapses_in = Sparse(weights=weights, - sign_mode=SignMode.EXCITATORY, - num_weight_bits=1, - num_message_bits=num_message_bits) + num_spike_integrators = 2 + np.ceil(num_variables / num_message_bits).astype(int) + + weights_state_in = self._get_input_weights( + num_vars=num_variables, + num_spike_int=num_spike_integrators, + num_vars_per_int=num_message_bits + ) + delays = csr_matrix(np.zeros((num_spike_integrators, num_variables), dtype=np.int8)) + delays[0,0] = 2 + print("weights_state_in ", weights_state_in) + self.synapses_state_in = DelaySparse( + weights=weights_state_in, + delays=delays, + sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=num_message_bits + ) + + weights_cost_in = self._get_cost_in_weights( + num_spike_int=num_spike_integrators, + ) + print("weights_cost_in", weights_cost_in) + self.synapses_cost_in = DelaySparse( + weights=weights_cost_in, + delays=weights_cost_in, + sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=32 + ) + + weights_timestep_in = self._get_timestep_in_weights( + num_spike_int=num_spike_integrators, + ) + print("weights_timestep_in", weights_timestep_in) + self.synapses_timestep_in = DelaySparse( + weights=weights_timestep_in, + delays=weights_timestep_in, + sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=32 + ) self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) self.solution_receiver = SolutionReceiver( - shape=(1,), - best_cost_init = self.best_cost.get(), - best_state_init = self.best_state.get(), - best_timestep_init = self.best_timestep.get()) + shape=(num_variables,), + best_cost_init = proc.best_cost.get(), + best_state_init = proc.best_state.get(), + best_timestep_init = proc.best_timestep.get() + ) # Connect the parent InPort to the InPort of the child-Process. - proc.in_ports.states_in.connect(self.synapses_in.s_in) - proc.in_ports.cost_in.connect(self.solution_receiver.cost_in) - proc.in_ports.timestep_in.connect(self.solution_receiver.timestep_in) + proc.in_ports.states_in.connect(self.synapses_state_in.s_in) + proc.in_ports.cost_in.connect(self.synapses_cost_in.s_in) + proc.in_ports.timestep_in.connect(self.synapses_timestep_in.s_in) # Connect intermediate ports - self.synapses_in.connect(self.spike_integrators.state_in) - self.spike_integrators.state_out.connect( - self.solution_receiver.state_in) + self.synapses_state_in.a_out.connect(self.spike_integrators.a_in) + self.synapses_cost_in.a_out.connect(self.spike_integrators.a_in) + self.synapses_timestep_in.a_out.connect(self.spike_integrators.a_in) + self.spike_integrators.s_out.connect( + self.solution_receiver.results_in) # Create aliases for variables proc.vars.best_state.alias(self.solution_receiver.best_state) @@ -131,19 +167,21 @@ def __init__(self, proc): def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): """To be verified. Deprecated due to efficiency""" weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) - for spike_integrator in range(num_spike_int - 1): + print(f"{num_vars=}") + print(f"{num_spike_int=}") + print(f"{num_vars_per_int=}") + for spike_integrator in range(2, num_spike_int - 1): variable_start = num_vars_per_int*spike_integrator weights[spike_integrator, variable_start:variable_start + num_vars_per_int] = 1 - # The last spike integrator might be connected by less than # num_vars_per_int neurons # This happens when mod(num_variables, num_vars_per_int) != 0 - weights[-1, num_vars_per_int*(spike_integrator + 1): -1] = 1 + weights[-1, num_vars_per_int*(num_spike_int - 3):] = 1 - return weights + return csr_matrix(weights) @staticmethod - def _get_input_weights_index(num_vars, num_spike_int, num_vars_per_int): + def _get_state_in_weights_index(num_vars, num_spike_int, num_vars_per_int): """To be verified""" weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) @@ -157,3 +195,15 @@ def _get_input_weights_index(num_vars, num_spike_int, num_vars_per_int): weights[-1, num_vars_per_int * (num_spike_int - 1):num_vars] = 1 return weights + + @staticmethod + def _get_cost_in_weights(num_spike_int: int) -> csr_matrix: + weights = np.zeros((num_spike_int, 1), dtype=int) + weights[0,0] = 1 + return csr_matrix(weights) + + @staticmethod + def _get_timestep_in_weights(num_spike_int: int) -> csr_matrix: + weights = np.zeros((num_spike_int, 1), dtype=int) + weights[1,0] = 1 + return csr_matrix(weights) diff --git a/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py b/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py index 5fe8d281..20cab0b8 100644 --- a/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py +++ b/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py @@ -32,7 +32,7 @@ def setUp(self) -> None: integrator_last_bytes.s_out.connect(readgate.cost_in_last_bytes_0) integrator_first_byte.s_out.connect(readgate.cost_in_first_byte_0) readgate.solution_reader.connect_var(spiker.payload) - readgate.solution_out.connect(self.readout.read_solution) + readgate.solution_out.connect(self.readout.state_in) readgate.cost_out.connect(self.readout.cost_in) readgate.send_pause_request.connect(self.readout.timestep_in) From e80b96ed005ba681cce5dc420bc71ec8c2638fb1 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 12 Dec 2023 10:05:32 +0100 Subject: [PATCH 08/34] revising CostIntegrator --- .../generic/cost_integrator/process.py | 51 ++++++++++++++----- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py b/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py index 594539e5..567e18a9 100644 --- a/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py @@ -55,28 +55,51 @@ def __init__( self, *, shape: ty.Tuple[int, ...] = (1,), - min_cost: int = 0, # trivial solution, where all variables are 0 + target_cost: int = -2**31 + 1, + timeout: int = 2**24 - 1, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: - super().__init__(shape=shape, name=name, log_config=log_config) + + self._input_validation(target_cost=target_cost, + timeout=timeout) + + super().__init__(shape=shape, + target_cost = target_cost, + timeout = timeout, + name=name, + log_config=log_config) self.cost_in = InPort(shape=shape) - self.cost_out_last_bytes = OutPort(shape=shape) - self.cost_out_first_byte = OutPort(shape=shape) + self.control_states_out = OutPort(shape=shape) + self.best_cost_out = OutPort(shape=shape) + self.best_timestep_out = OutPort(shape=shape) + + # Counter for timesteps + self.timestep = Var(shape=shape, init=0) + # Storage for best current time step + self.best_timestep = Var(shape=shape, init=0) - # Current cost initiated to zero + # Var to store current cost # Note: Total cost = cost_first_byte << 24 + cost_last_bytes - self.cost_last_bytes = Var(shape=shape, init=0) + # last 24 bit of cost + self.cost_min_last_bytes = Var(shape=shape, init=0) # first 8 bit of cost - self.cost_first_byte = Var(shape=shape, init=0) + self.cost_min_first_byte = Var(shape=shape, init=0) + # Var to store best cost found to far # Note: Total min cost = cost_min_first_byte << 24 + cost_min_last_bytes - # Extract first 8 bit - cost_min_first_byte = np.right_shift(min_cost, 24) - cost_min_first_byte = max(-2 ** 7, min(cost_min_first_byte, 2 ** 7 - 1)) - # Extract last 24 bit - cost_min_last_bytes = min_cost & 2 ** 24 - 1 # last 24 bit of cost - self.cost_min_last_bytes = Var(shape=shape, init=cost_min_last_bytes) + self.cost_min_last_bytes = Var(shape=shape, init=0) # first 8 bit of cost - self.cost_min_first_byte = Var(shape=shape, init=cost_min_first_byte) + self.cost_min_first_byte = Var(shape=shape, init=0) + + @staticmethod + def _input_validation(target_cost, timeout) -> None: + + assert (target_cost and timeout), f"Both the target_cost and the " \ + f"timeout must be defined" + assert 0 > target_cost >= -2**31 + 1, \ + f"The target cost must in the range [-2**32 + 1, 0), " \ + f"but is {target_cost}." + assert 0 < timeout <= 2**24 - 1, f"The timeout must be in the range (" \ + f"0, 2**24 - 1], but is {timeout}." \ No newline at end of file From 341e778840ce40e96e006b4fb0a2ea365697c4b9 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 12 Dec 2023 14:36:20 +0100 Subject: [PATCH 09/34] ensure that run_async also stops if no solution found --- .../solvers/generic/solution_receiver/models.py | 13 +++++++++---- .../solvers/generic/solution_receiver/process.py | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index 0e21ec9f..d6290356 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -50,16 +50,21 @@ def run_async(self): num_message_bits = self.num_message_bits[0] results_buffer = 0 - while not np.any(results_buffer): + while self._check_if_input(results_buffer): results_buffer = self.results_in.recv() - print(results_buffer, ".....................................") self.best_cost = results_buffer[0] - self.best_timestep = results_buffer[1] - self.best_state[:] = self._decompress_state(results_buffer[2:], 24)[:self.best_state.shape[0]] + self.best_timestep = results_buffer[1] - 1 + self.best_state[:] = self._decompress_state( + compressed_states=results_buffer[2:], + num_message_bits=num_message_bits)[:self.best_state.shape[0]] self._req_pause = True + @staticmethod + def _check_if_input(results_buffer): + return not results_buffer[1] > 0 + @staticmethod def _decompress_state(compressed_states, num_message_bits): """Add info!""" diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index 9cfaf7a8..6ba6c9a5 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -87,7 +87,7 @@ def __init__( self.cost_in = InPort((1,)) self.timestep_in = InPort((1,)) self.best_state = Var(shape=(num_variables,), init=0) - self.best_timestep = Var(shape=(1,), init=0) + self.best_timestep = Var(shape=(1,), init=1) self.best_cost = Var(shape=(1,), init=0) From f34707dced22452d40810c114b9dc218f0fac1b0 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Wed, 13 Dec 2023 14:52:47 +0100 Subject: [PATCH 10/34] passing unit tests for new CostIntegrator --- .../solvers/generic/cost_integrator/process.py | 8 ++++---- .../solvers/generic/solution_receiver/models.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py b/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py index 567e18a9..04cc940b 100644 --- a/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py @@ -82,9 +82,9 @@ def __init__( # Var to store current cost # Note: Total cost = cost_first_byte << 24 + cost_last_bytes # last 24 bit of cost - self.cost_min_last_bytes = Var(shape=shape, init=0) + self.cost_last_bytes = Var(shape=shape, init=0) # first 8 bit of cost - self.cost_min_first_byte = Var(shape=shape, init=0) + self.cost_first_byte = Var(shape=shape, init=0) # Var to store best cost found to far # Note: Total min cost = cost_min_first_byte << 24 + cost_min_last_bytes @@ -96,8 +96,8 @@ def __init__( @staticmethod def _input_validation(target_cost, timeout) -> None: - assert (target_cost and timeout), f"Both the target_cost and the " \ - f"timeout must be defined" + assert (target_cost is not None and timeout is not None), \ + f"Both the target_cost and the timeout must be defined" assert 0 > target_cost >= -2**31 + 1, \ f"The target cost must in the range [-2**32 + 1, 0), " \ f"but is {target_cost}." diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index d6290356..a469360f 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -54,7 +54,7 @@ def run_async(self): results_buffer = self.results_in.recv() self.best_cost = results_buffer[0] - self.best_timestep = results_buffer[1] - 1 + self.best_timestep = results_buffer[1] self.best_state[:] = self._decompress_state( compressed_states=results_buffer[2:], num_message_bits=num_message_bits)[:self.best_state.shape[0]] From 3808fc2744a0ee078e71bbececf9964b5756c6fd Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Thu, 14 Dec 2023 21:21:27 +0100 Subject: [PATCH 11/34] nebm_sa: first revision, untested --- src/lava/lib/optimization/solvers/generic/nebm/process.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/lava/lib/optimization/solvers/generic/nebm/process.py b/src/lava/lib/optimization/solvers/generic/nebm/process.py index 244e6541..14edd7cd 100644 --- a/src/lava/lib/optimization/solvers/generic/nebm/process.py +++ b/src/lava/lib/optimization/solvers/generic/nebm/process.py @@ -115,8 +115,10 @@ def __init__( self.a_in = InPort(shape=shape) self.delta_temperature_in = InPort(shape=shape) + self.control_cost_integrator = InPort(shape=shape) self.s_sig_out = OutPort(shape=shape) self.s_wta_out = OutPort(shape=shape) + self.best_state_out = OutPort(shape=shape) self.spk_hist = Var( shape=shape, init=(np.zeros(shape=shape) + init_value).astype(int) @@ -131,7 +133,11 @@ def __init__( np.random.randint(0, 2**8, size=shape), (refract_scaling or 0) ), ) - + # Storage for the best state. Will get updated whenever a better + # state was found + # Default is all zeros + self.best_state = Var(shape=shape, + init=np.zeros(shape=shape, dtype=int)) # Initial state determined in DiscreteVariables self.state = Var( shape=shape, From add481cc7803b4a2ba3673ee4abd25572f708eaf Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Fri, 15 Dec 2023 16:35:01 +0100 Subject: [PATCH 12/34] spikeIO running in unit test, but no msgs arriving yet --- .../generic/solution_receiver/models.py | 40 +++++++++++-------- .../generic/solution_receiver/process.py | 24 +++++++---- 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index a469360f..c8f36b40 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -50,11 +50,15 @@ def run_async(self): num_message_bits = self.num_message_bits[0] results_buffer = 0 + print("Here I am!") while self._check_if_input(results_buffer): results_buffer = self.results_in.recv() self.best_cost = results_buffer[0] self.best_timestep = results_buffer[1] + + # best states are returned with a delay of 1 timestep + results_buffer = self.results_in.recv() self.best_state[:] = self._decompress_state( compressed_states=results_buffer[2:], num_message_bits=num_message_bits)[:self.best_state.shape[0]] @@ -99,20 +103,18 @@ def __init__(self, proc): num_message_bits = proc.proc_params.get("num_message_bits") # Define the dense input layer - num_variables = np.prod(proc.proc_params.get("shape")) - num_spike_integrators = 2 + np.ceil(num_variables / num_message_bits).astype(int) + num_bin_variables = proc.proc_params.get("num_bin_variables") + num_spike_integrators = proc.proc_params.get("num_spike_integrators") + + connection_config = proc.proc_params.get("connection_config") weights_state_in = self._get_input_weights( - num_vars=num_variables, + num_vars=num_bin_variables, num_spike_int=num_spike_integrators, num_vars_per_int=num_message_bits ) - delays = csr_matrix(np.zeros((num_spike_integrators, num_variables), dtype=np.int8)) - delays[0,0] = 2 - print("weights_state_in ", weights_state_in) - self.synapses_state_in = DelaySparse( + self.synapses_state_in = Sparse( weights=weights_state_in, - delays=delays, sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=num_message_bits @@ -122,30 +124,31 @@ def __init__(self, proc): num_spike_int=num_spike_integrators, ) print("weights_cost_in", weights_cost_in) - self.synapses_cost_in = DelaySparse( + self.synapses_cost_in = Sparse( weights=weights_cost_in, - delays=weights_cost_in, sign_mode=SignMode.EXCITATORY, num_weight_bits=8, - num_message_bits=32 + num_message_bits=32, ) weights_timestep_in = self._get_timestep_in_weights( num_spike_int=num_spike_integrators, ) print("weights_timestep_in", weights_timestep_in) - self.synapses_timestep_in = DelaySparse( + self.synapses_timestep_in = Sparse( weights=weights_timestep_in, - delays=weights_timestep_in, sign_mode=SignMode.EXCITATORY, num_weight_bits=8, - num_message_bits=32 + num_message_bits=32, ) self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) self.solution_receiver = SolutionReceiver( - shape=(num_variables,), + shape=(1,), + num_variables = num_bin_variables, + num_spike_integrators = num_spike_integrators, + num_message_bits = num_message_bits, best_cost_init = proc.best_cost.get(), best_state_init = proc.best_state.get(), best_timestep_init = proc.best_timestep.get() @@ -160,8 +163,9 @@ def __init__(self, proc): self.synapses_state_in.a_out.connect(self.spike_integrators.a_in) self.synapses_cost_in.a_out.connect(self.spike_integrators.a_in) self.synapses_timestep_in.a_out.connect(self.spike_integrators.a_in) + self.spike_integrators.s_out.connect( - self.solution_receiver.results_in) + self.solution_receiver.results_in, connection_config) # Create aliases for variables proc.vars.best_state.alias(self.solution_receiver.best_state) @@ -183,6 +187,10 @@ def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): # This happens when mod(num_variables, num_vars_per_int) != 0 weights[-1, num_vars_per_int*(num_spike_int - 3):] = 1 + print("=" * 20) + print(f"{weights=}") + print("=" * 20) + return csr_matrix(weights) @staticmethod diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index 6ba6c9a5..34be40e5 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -8,6 +8,7 @@ from lava.magma.core.process.process import AbstractProcess, LogConfig from lava.magma.core.process.variable import Var +from lava.magma.core.process.ports.connection_config import ConnectionConfig class SpikeIntegrator(AbstractProcess): """GradedVec @@ -53,7 +54,9 @@ class SolutionReadout(AbstractProcess): def __init__( self, - shape_in: ty.Tuple[int, ...], + shape: ty.Tuple[int, ...], + connection_config: ConnectionConfig, + num_bin_variables: int, num_message_bits = 24, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, @@ -75,18 +78,23 @@ def __init__( value that is determined automatically. log_config: LogConfig, optional Configuration options for logging.z""" + + num_spike_integrators = 2 + np.ceil(num_bin_variables / num_message_bits).astype(int) + super().__init__( - shape=shape_in, + shape=shape, + num_spike_integrators=num_spike_integrators, + num_bin_variables=num_bin_variables, num_message_bits=num_message_bits, + connection_config=connection_config, name=name, log_config=log_config, ) - num_variables = np.prod(shape_in) - self.states_in = InPort(shape=(num_variables,)) + self.states_in = InPort(shape=(num_bin_variables,)) self.cost_in = InPort((1,)) self.timestep_in = InPort((1,)) - self.best_state = Var(shape=(num_variables,), init=0) + self.best_state = Var(shape=(num_bin_variables,), init=0) self.best_timestep = Var(shape=(1,), init=1) self.best_cost = Var(shape=(1,), init=0) @@ -123,8 +131,10 @@ class SolutionReceiver(AbstractProcess): def __init__( self, shape: ty.Tuple[int, ...], + num_variables: int, best_cost_init: int, best_state_init: int, + num_spike_integrators: int, best_timestep_init: int, num_message_bits: int = 24, name: ty.Optional[str] = None, @@ -132,12 +142,12 @@ def __init__( ) -> None: super().__init__( shape=shape, + num_variables=num_variables, name=name, log_config=log_config, ) - num_spike_integrators = 2 + np.ceil(shape[0] / num_message_bits).astype(int) - self.best_state = Var(shape=shape, init=best_state_init) + self.best_state = Var(shape=(num_variables,), init=best_state_init) self.best_timestep = Var(shape=(1,), init=best_timestep_init) self.best_cost = Var(shape=(1,), init=best_cost_init) self.num_message_bits = Var(shape=(1,), init=num_message_bits) From e840f925e3d8e3a09e0246d673c3e20a9533bffd Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Fri, 15 Dec 2023 19:42:15 +0100 Subject: [PATCH 13/34] spikeIO msgs arriving --- .../generic/solution_receiver/models.py | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index c8f36b40..dfd86d86 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -49,19 +49,27 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): def run_async(self): num_message_bits = self.num_message_bits[0] - results_buffer = 0 + results_buffer = np.zeros(self.results_in._shape) print("Here I am!") while self._check_if_input(results_buffer): + print("In while loop!") results_buffer = self.results_in.recv() - - self.best_cost = results_buffer[0] - self.best_timestep = results_buffer[1] + print("Finished while loop") + print(results_buffer) + self.best_cost, self.best_timestep, _ = self._decompress_state( + compressed_states=results_buffer, + num_message_bits=num_message_bits) + print(self.best_cost) + print(self.best_timestep) + print("-" * 20) # best states are returned with a delay of 1 timestep results_buffer = self.results_in.recv() - self.best_state[:] = self._decompress_state( - compressed_states=results_buffer[2:], - num_message_bits=num_message_bits)[:self.best_state.shape[0]] + _, _, self.best_state[:] = self._decompress_state( + compressed_states=results_buffer, + num_message_bits=num_message_bits) #[:self.best_state.shape[0]] + print(self.best_state) + print("Finished") self._req_pause = True @@ -72,11 +80,14 @@ def _check_if_input(results_buffer): @staticmethod def _decompress_state(compressed_states, num_message_bits): """Add info!""" - boolean_array = (compressed_states[:, None] & ( + cost = compressed_states[0] + timestep = compressed_states[1] + + states = (compressed_states[2:, None] & ( 1 << np.arange(num_message_bits - 1, -1, -1))) != 0 # reshape into a 1D array - boolean_array.reshape(-1) - return boolean_array.astype(np.int8).flatten() + states.reshape(-1) + return cost, timestep, states.astype(np.int8).flatten() """ def test_code(): From c946c7d629c2cd10691fb458920357c53b72fc05 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Thu, 21 Dec 2023 21:25:02 +0100 Subject: [PATCH 14/34] unit tests passing for SolutionReceiver and SolutionReadout, Spiker32bit now supports signed --- .../generic/solution_receiver/models.py | 78 ++++++++++++------- .../generic/solution_receiver/process.py | 7 +- 2 files changed, 54 insertions(+), 31 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index dfd86d86..0668e551 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -9,6 +9,7 @@ PyLoihiProcessModel, PyAsyncProcessModel ) +from bitstring import Bits from lava.magma.core.model.py.ports import PyInPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.model.sub.model import AbstractSubProcessModel @@ -25,6 +26,7 @@ from lava.proc import embedded_io as eio from scipy.sparse import csr_matrix + @implements(SolutionReceiver, protocol=AsyncProtocol) @requires(CPU) class SolutionReceiverPyModel(PyAsyncProcessModel): @@ -48,29 +50,38 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): def run_async(self): num_message_bits = self.num_message_bits[0] + num_vars = self.best_state.shape[0] + print("+" * 20) + print(self.best_state) + print("+" * 20) results_buffer = np.zeros(self.results_in._shape) - print("Here I am!") + print("Starting reception") while self._check_if_input(results_buffer): print("In while loop!") results_buffer = self.results_in.recv() print("Finished while loop") - print(results_buffer) + print(f"{results_buffer=}") self.best_cost, self.best_timestep, _ = self._decompress_state( compressed_states=results_buffer, - num_message_bits=num_message_bits) - print(self.best_cost) - print(self.best_timestep) + num_message_bits=num_message_bits, + num_vars=num_vars) + print(f"{self.best_cost=}") + print(f"{self.best_timestep=}") print("-" * 20) # best states are returned with a delay of 1 timestep results_buffer = self.results_in.recv() - _, _, self.best_state[:] = self._decompress_state( + print("Received further results") + _, _, states = self._decompress_state( compressed_states=results_buffer, - num_message_bits=num_message_bits) #[:self.best_state.shape[0]] - print(self.best_state) + num_message_bits=num_message_bits, + num_vars=num_vars) #[:self.best_state.shape[0]] print("Finished") - + print(f"{self.best_state=}") + print(f"{states=}") + self.best_state = states + print(f"{self.best_state}") self._req_pause = True @staticmethod @@ -78,16 +89,22 @@ def _check_if_input(results_buffer): return not results_buffer[1] > 0 @staticmethod - def _decompress_state(compressed_states, num_message_bits): + def _decompress_state(compressed_states, num_message_bits, num_vars): """Add info!""" - cost = compressed_states[0] - timestep = compressed_states[1] - + cost = int(compressed_states[0]) + timestep = int(compressed_states[1]) states = (compressed_states[2:, None] & ( - 1 << np.arange(num_message_bits - 1, -1, -1))) != 0 + 1 << np.arange(0, num_message_bits))) != 0 + #1 << np.arange(num_message_bits - 1, -1, -1))) != 0 # reshape into a 1D array states.reshape(-1) - return cost, timestep, states.astype(np.int8).flatten() + # If n_vars is not a multiple of num_message_bits, then last entries + # must be cut off + states = states.astype(np.int8).flatten()[:num_vars] + return cost, timestep, states + + + """ def test_code(): @@ -122,22 +139,26 @@ def __init__(self, proc): weights_state_in = self._get_input_weights( num_vars=num_bin_variables, num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits + num_vars_per_int=num_message_bits, ) self.synapses_state_in = Sparse( weights=weights_state_in, - sign_mode=SignMode.EXCITATORY, + #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, - num_message_bits=num_message_bits + num_message_bits=num_message_bits, + weight_exp=0, ) + #CAREFUL! Weights are negated here, since CostIn will always be < 0 + # but SpikeIntegrators only deal with positive numbers. + # This is accounted for in self._decompress_state() weights_cost_in = self._get_cost_in_weights( num_spike_int=num_spike_integrators, ) - print("weights_cost_in", weights_cost_in) + #print("weights_cost_in", weights_cost_in) self.synapses_cost_in = Sparse( weights=weights_cost_in, - sign_mode=SignMode.EXCITATORY, + #sign_mode=SignMode.INHIBITORY, num_weight_bits=8, num_message_bits=32, ) @@ -145,10 +166,10 @@ def __init__(self, proc): weights_timestep_in = self._get_timestep_in_weights( num_spike_int=num_spike_integrators, ) - print("weights_timestep_in", weights_timestep_in) + #print("weights_timestep_in", weights_timestep_in) self.synapses_timestep_in = Sparse( weights=weights_timestep_in, - sign_mode=SignMode.EXCITATORY, + #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=32, ) @@ -186,10 +207,11 @@ def __init__(self, proc): @staticmethod def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): """To be verified. Deprecated due to efficiency""" + weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) - print(f"{num_vars=}") - print(f"{num_spike_int=}") - print(f"{num_vars_per_int=}") + #print(f"{num_vars=}") + #print(f"{num_spike_int=}") + #print(f"{num_vars_per_int=}") for spike_integrator in range(2, num_spike_int - 1): variable_start = num_vars_per_int*spike_integrator weights[spike_integrator, variable_start:variable_start + num_vars_per_int] = 1 @@ -198,9 +220,9 @@ def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): # This happens when mod(num_variables, num_vars_per_int) != 0 weights[-1, num_vars_per_int*(num_spike_int - 3):] = 1 - print("=" * 20) - print(f"{weights=}") - print("=" * 20) + #print("=" * 20) + #print(f"{weights=}") + #print("=" * 20) return csr_matrix(weights) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index 34be40e5..c7c76894 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -1,8 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import numpy as np import typing as ty +import numpy.typing as npty from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.process import AbstractProcess, LogConfig @@ -31,7 +32,7 @@ def __init__( class SolutionReadout(AbstractProcess): - r"""Process which implementation holds the solution readout layer + r"""Process which implements the solution readout layer on the solver of an optimization problem. Attributes @@ -133,7 +134,7 @@ def __init__( shape: ty.Tuple[int, ...], num_variables: int, best_cost_init: int, - best_state_init: int, + best_state_init: ty.Union[npty.ArrayLike, int], num_spike_integrators: int, best_timestep_init: int, num_message_bits: int = 24, From 8bcfbe91cb4c906528bc62a1b02daa14ebd4443a Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Fri, 22 Dec 2023 19:31:52 +0100 Subject: [PATCH 15/34] revised nebm_sa, old unit tests pass --- .../optimization/solvers/generic/solution_receiver/models.py | 4 ++-- .../optimization/solvers/generic/solution_receiver/process.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index 0668e551..e9949e82 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -158,7 +158,6 @@ def __init__(self, proc): #print("weights_cost_in", weights_cost_in) self.synapses_cost_in = Sparse( weights=weights_cost_in, - #sign_mode=SignMode.INHIBITORY, num_weight_bits=8, num_message_bits=32, ) @@ -214,7 +213,8 @@ def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): #print(f"{num_vars_per_int=}") for spike_integrator in range(2, num_spike_int - 1): variable_start = num_vars_per_int*spike_integrator - weights[spike_integrator, variable_start:variable_start + num_vars_per_int] = 1 + weights[spike_integrator, variable_start:variable_start + + num_vars_per_int] = 1 # The last spike integrator might be connected by less than # num_vars_per_int neurons # This happens when mod(num_variables, num_vars_per_int) != 0 diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py index c7c76894..f980d966 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py @@ -58,7 +58,7 @@ def __init__( shape: ty.Tuple[int, ...], connection_config: ConnectionConfig, num_bin_variables: int, - num_message_bits = 24, + num_message_bits = 32, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: From 6dc1607a994bf5745b299002d54e757f99f8e59e Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Fri, 12 Jan 2024 04:17:52 -0800 Subject: [PATCH 16/34] Add synapses NEBM -> SpikeIntegrators --- .../generic/solution_receiver/models.py | 68 ++++++++++++++++--- 1 file changed, 59 insertions(+), 9 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index e9949e82..5c1eda10 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -136,19 +136,62 @@ def __init__(self, proc): connection_config = proc.proc_params.get("connection_config") - weights_state_in = self._get_input_weights( + weights_state_in_0 = self._get_input_weights( num_vars=num_bin_variables, num_spike_int=num_spike_integrators, num_vars_per_int=num_message_bits, + weight_exp=0 ) - self.synapses_state_in = Sparse( - weights=weights_state_in, + self.synapses_state_in_0 = Sparse( + weights=weights_state_in_0, #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=0, ) + weights_state_in_1 = self._get_input_weights( + num_vars=num_bin_variables, + num_spike_int=num_spike_integrators, + num_vars_per_int=num_message_bits, + weight_exp=8 + ) + self.synapses_state_in_1 = Sparse( + weights=weights_state_in_1, + #sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=num_message_bits, + weight_exp=8, + ) + + weights_state_in_2 = self._get_input_weights( + num_vars=num_bin_variables, + num_spike_int=num_spike_integrators, + num_vars_per_int=num_message_bits, + weight_exp=16 + ) + self.synapses_state_in_2 = Sparse( + weights=weights_state_in_2, + #sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=num_message_bits, + weight_exp=16, + ) + + weights_state_in_3 = self._get_input_weights( + num_vars=num_bin_variables, + num_spike_int=num_spike_integrators, + num_vars_per_int=num_message_bits, + weight_exp=24 + ) + self.synapses_state_in_3 = Sparse( + weights=weights_state_in_3, + #sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=num_message_bits, + weight_exp=24, + ) + #CAREFUL! Weights are negated here, since CostIn will always be < 0 # but SpikeIntegrators only deal with positive numbers. # This is accounted for in self._decompress_state() @@ -186,12 +229,18 @@ def __init__(self, proc): ) # Connect the parent InPort to the InPort of the child-Process. - proc.in_ports.states_in.connect(self.synapses_state_in.s_in) + proc.in_ports.states_in.connect(self.synapses_state_in_0.s_in) + proc.in_ports.states_in.connect(self.synapses_state_in_1.s_in) + proc.in_ports.states_in.connect(self.synapses_state_in_2.s_in) + proc.in_ports.states_in.connect(self.synapses_state_in_3.s_in) proc.in_ports.cost_in.connect(self.synapses_cost_in.s_in) proc.in_ports.timestep_in.connect(self.synapses_timestep_in.s_in) # Connect intermediate ports - self.synapses_state_in.a_out.connect(self.spike_integrators.a_in) + self.synapses_state_in_0.a_out.connect(self.spike_integrators.a_in) + self.synapses_state_in_1.a_out.connect(self.spike_integrators.a_in) + self.synapses_state_in_2.a_out.connect(self.spike_integrators.a_in) + self.synapses_state_in_3.a_out.connect(self.spike_integrators.a_in) self.synapses_cost_in.a_out.connect(self.spike_integrators.a_in) self.synapses_timestep_in.a_out.connect(self.spike_integrators.a_in) @@ -204,7 +253,7 @@ def __init__(self, proc): proc.vars.best_cost.alias(self.solution_receiver.best_cost) @staticmethod - def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): + def _get_input_weights(num_vars, num_spike_int, num_vars_per_int, weight_exp): """To be verified. Deprecated due to efficiency""" weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) @@ -212,13 +261,14 @@ def _get_input_weights(num_vars, num_spike_int, num_vars_per_int): #print(f"{num_spike_int=}") #print(f"{num_vars_per_int=}") for spike_integrator in range(2, num_spike_int - 1): - variable_start = num_vars_per_int*spike_integrator + variable_start = 32 * (spike_integrator - 2) + weight_exp weights[spike_integrator, variable_start:variable_start + - num_vars_per_int] = 1 + 9] = np.power(2, np.arange(8)) # The last spike integrator might be connected by less than # num_vars_per_int neurons # This happens when mod(num_variables, num_vars_per_int) != 0 - weights[-1, num_vars_per_int*(num_spike_int - 3):] = 1 + variable_start = 32 * (num_spike_int - 3) + weight_exp + weights[-1, variable_start:] = np.power(2, np.arange(weights.shape[1]-variable_start)) #print("=" * 20) #print(f"{weights=}") From 2784092315b24f0b5cb9a50b40b7c1317addba87 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 16 Jan 2024 15:13:38 +0100 Subject: [PATCH 17/34] SolutionReadout functional and tested --- .../solvers/generic/solution_receiver/models.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py index 5c1eda10..879b40c4 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py @@ -192,9 +192,6 @@ def __init__(self, proc): weight_exp=24, ) - #CAREFUL! Weights are negated here, since CostIn will always be < 0 - # but SpikeIntegrators only deal with positive numbers. - # This is accounted for in self._decompress_state() weights_cost_in = self._get_cost_in_weights( num_spike_int=num_spike_integrators, ) @@ -256,18 +253,20 @@ def __init__(self, proc): def _get_input_weights(num_vars, num_spike_int, num_vars_per_int, weight_exp): """To be verified. Deprecated due to efficiency""" - weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) + weights = np.zeros((num_spike_int, num_vars), dtype=np.uint8) #print(f"{num_vars=}") #print(f"{num_spike_int=}") #print(f"{num_vars_per_int=}") + # The first two SpikeIntegrators receive best_cost and best_timestep for spike_integrator in range(2, num_spike_int - 1): - variable_start = 32 * (spike_integrator - 2) + weight_exp + variable_start = num_vars_per_int * (spike_integrator - 2) + weight_exp weights[spike_integrator, variable_start:variable_start + - 9] = np.power(2, np.arange(8)) + 8] = np.power(2, + np.arange(8)) # The last spike integrator might be connected by less than # num_vars_per_int neurons # This happens when mod(num_variables, num_vars_per_int) != 0 - variable_start = 32 * (num_spike_int - 3) + weight_exp + variable_start = num_vars_per_int * (num_spike_int - 3) + weight_exp weights[-1, variable_start:] = np.power(2, np.arange(weights.shape[1]-variable_start)) #print("=" * 20) From d17e08573d7f25eb69a8c1e85f315b47aab27752 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Thu, 18 Jan 2024 12:36:32 +0100 Subject: [PATCH 18/34] Renamend SolutionReadout --- .../models.py | 36 ++++--------------- .../process.py | 2 +- 2 files changed, 8 insertions(+), 30 deletions(-) rename src/lava/lib/optimization/solvers/generic/{solution_receiver => solution_readout_ethernet}/models.py (90%) rename src/lava/lib/optimization/solvers/generic/{solution_receiver => solution_readout_ethernet}/process.py (99%) diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py b/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py similarity index 90% rename from src/lava/lib/optimization/solvers/generic/solution_receiver/models.py rename to src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py index 879b40c4..59d0af47 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py @@ -2,8 +2,7 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import numpy as np -from lava.lib.optimization.solvers.generic.monitoring_processes\ - .solution_readout.process import SolutionReadout +from lava.lib.optimization.solvers.generic.solution_readout_ethernet.process import SolutionReadoutEthernet from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import ( PyLoihiProcessModel, @@ -16,7 +15,7 @@ from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol -from lava.lib.optimization.solvers.generic.solution_receiver.process import \ +from lava.lib.optimization.solvers.generic.solution_readout_ethernet.process import \ ( SolutionReceiver, SpikeIntegrator ) @@ -52,36 +51,21 @@ def run_async(self): num_message_bits = self.num_message_bits[0] num_vars = self.best_state.shape[0] - print("+" * 20) - print(self.best_state) - print("+" * 20) results_buffer = np.zeros(self.results_in._shape) - print("Starting reception") while self._check_if_input(results_buffer): - print("In while loop!") results_buffer = self.results_in.recv() - print("Finished while loop") - print(f"{results_buffer=}") self.best_cost, self.best_timestep, _ = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, num_vars=num_vars) - print(f"{self.best_cost=}") - print(f"{self.best_timestep=}") - print("-" * 20) # best states are returned with a delay of 1 timestep results_buffer = self.results_in.recv() - print("Received further results") _, _, states = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, num_vars=num_vars) #[:self.best_state.shape[0]] - print("Finished") - print(f"{self.best_state=}") - print(f"{states=}") self.best_state = states - print(f"{self.best_state}") self._req_pause = True @staticmethod @@ -120,9 +104,9 @@ def test_code(): print(boolean_array) """ -@implements(proc=SolutionReadout, protocol=LoihiProtocol) +@implements(proc=SolutionReadoutEthernet, protocol=LoihiProtocol) @requires(CPU) -class SolutionReadoutModel(AbstractSubProcessModel): +class SolutionReadoutEthernetModel(AbstractSubProcessModel): """Model for the SolutionReadout process. """ @@ -195,7 +179,7 @@ def __init__(self, proc): weights_cost_in = self._get_cost_in_weights( num_spike_int=num_spike_integrators, ) - #print("weights_cost_in", weights_cost_in) + self.synapses_cost_in = Sparse( weights=weights_cost_in, num_weight_bits=8, @@ -205,7 +189,7 @@ def __init__(self, proc): weights_timestep_in = self._get_timestep_in_weights( num_spike_int=num_spike_integrators, ) - #print("weights_timestep_in", weights_timestep_in) + self.synapses_timestep_in = Sparse( weights=weights_timestep_in, #sign_mode=SignMode.EXCITATORY, @@ -254,9 +238,7 @@ def _get_input_weights(num_vars, num_spike_int, num_vars_per_int, weight_exp): """To be verified. Deprecated due to efficiency""" weights = np.zeros((num_spike_int, num_vars), dtype=np.uint8) - #print(f"{num_vars=}") - #print(f"{num_spike_int=}") - #print(f"{num_vars_per_int=}") + # The first two SpikeIntegrators receive best_cost and best_timestep for spike_integrator in range(2, num_spike_int - 1): variable_start = num_vars_per_int * (spike_integrator - 2) + weight_exp @@ -269,10 +251,6 @@ def _get_input_weights(num_vars, num_spike_int, num_vars_per_int, weight_exp): variable_start = num_vars_per_int * (num_spike_int - 3) + weight_exp weights[-1, variable_start:] = np.power(2, np.arange(weights.shape[1]-variable_start)) - #print("=" * 20) - #print(f"{weights=}") - #print("=" * 20) - return csr_matrix(weights) @staticmethod diff --git a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py b/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/process.py similarity index 99% rename from src/lava/lib/optimization/solvers/generic/solution_receiver/process.py rename to src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/process.py index f980d966..f6f28586 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_receiver/process.py +++ b/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/process.py @@ -31,7 +31,7 @@ def __init__( self.s_out = OutPort(shape=shape) -class SolutionReadout(AbstractProcess): +class SolutionReadoutEthernet(AbstractProcess): r"""Process which implements the solution readout layer on the solver of an optimization problem. From e35212debee5225f11086808093e664091f26f07 Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Fri, 19 Jan 2024 06:33:01 -0800 Subject: [PATCH 19/34] Bug fix in SolutionReadout Co-authored-by: Philipp Stratmann --- .../solution_readout_ethernet/models.py | 124 +++++++++--------- 1 file changed, 64 insertions(+), 60 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py b/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py index 59d0af47..4b5b78a9 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py @@ -120,6 +120,8 @@ def __init__(self, proc): connection_config = proc.proc_params.get("connection_config") + self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) + weights_state_in_0 = self._get_input_weights( num_vars=num_bin_variables, num_spike_int=num_spike_integrators, @@ -134,52 +136,66 @@ def __init__(self, proc): weight_exp=0, ) - weights_state_in_1 = self._get_input_weights( - num_vars=num_bin_variables, - num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits, - weight_exp=8 - ) - self.synapses_state_in_1 = Sparse( - weights=weights_state_in_1, - #sign_mode=SignMode.EXCITATORY, - num_weight_bits=8, - num_message_bits=num_message_bits, - weight_exp=8, - ) - - weights_state_in_2 = self._get_input_weights( - num_vars=num_bin_variables, - num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits, - weight_exp=16 - ) - self.synapses_state_in_2 = Sparse( - weights=weights_state_in_2, - #sign_mode=SignMode.EXCITATORY, - num_weight_bits=8, - num_message_bits=num_message_bits, - weight_exp=16, - ) - - weights_state_in_3 = self._get_input_weights( - num_vars=num_bin_variables, - num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits, - weight_exp=24 - ) - self.synapses_state_in_3 = Sparse( - weights=weights_state_in_3, - #sign_mode=SignMode.EXCITATORY, - num_weight_bits=8, - num_message_bits=num_message_bits, - weight_exp=24, - ) + proc.in_ports.states_in.connect(self.synapses_state_in_0.s_in) + self.synapses_state_in_0.a_out.connect(self.spike_integrators.a_in) + if num_bin_variables > 8: + weights_state_in_1 = self._get_input_weights( + num_vars=num_bin_variables, + num_spike_int=num_spike_integrators, + num_vars_per_int=num_message_bits, + weight_exp=8 + ) + self.synapses_state_in_1 = Sparse( + weights=weights_state_in_1, + #sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=num_message_bits, + weight_exp=8, + ) + + proc.in_ports.states_in.connect(self.synapses_state_in_1.s_in) + self.synapses_state_in_1.a_out.connect(self.spike_integrators.a_in) + + if num_bin_variables > 16: + weights_state_in_2 = self._get_input_weights( + num_vars=num_bin_variables, + num_spike_int=num_spike_integrators, + num_vars_per_int=num_message_bits, + weight_exp=16 + ) + self.synapses_state_in_2 = Sparse( + weights=weights_state_in_2, + #sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=num_message_bits, + weight_exp=16, + ) + + proc.in_ports.states_in.connect(self.synapses_state_in_2.s_in) + self.synapses_state_in_2.a_out.connect(self.spike_integrators.a_in) + + if num_bin_variables > 24: + weights_state_in_3 = self._get_input_weights( + num_vars=num_bin_variables, + num_spike_int=num_spike_integrators, + num_vars_per_int=num_message_bits, + weight_exp=24 + ) + self.synapses_state_in_3 = Sparse( + weights=weights_state_in_3, + #sign_mode=SignMode.EXCITATORY, + num_weight_bits=8, + num_message_bits=num_message_bits, + weight_exp=24, + ) + proc.in_ports.states_in.connect(self.synapses_state_in_3.s_in) + self.synapses_state_in_3.a_out.connect(self.spike_integrators.a_in) + + # Connect the CostIntegrator weights_cost_in = self._get_cost_in_weights( num_spike_int=num_spike_integrators, ) - self.synapses_cost_in = Sparse( weights=weights_cost_in, num_weight_bits=8, @@ -189,15 +205,19 @@ def __init__(self, proc): weights_timestep_in = self._get_timestep_in_weights( num_spike_int=num_spike_integrators, ) - self.synapses_timestep_in = Sparse( weights=weights_timestep_in, #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=32, ) + + proc.in_ports.cost_in.connect(self.synapses_cost_in.s_in) + self.synapses_cost_in.a_out.connect(self.spike_integrators.a_in) + proc.in_ports.timestep_in.connect(self.synapses_timestep_in.s_in) + self.synapses_timestep_in.a_out.connect(self.spike_integrators.a_in) - self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) + # Define and connect the SolutionReceiver self.solution_receiver = SolutionReceiver( shape=(1,), @@ -209,22 +229,6 @@ def __init__(self, proc): best_timestep_init = proc.best_timestep.get() ) - # Connect the parent InPort to the InPort of the child-Process. - proc.in_ports.states_in.connect(self.synapses_state_in_0.s_in) - proc.in_ports.states_in.connect(self.synapses_state_in_1.s_in) - proc.in_ports.states_in.connect(self.synapses_state_in_2.s_in) - proc.in_ports.states_in.connect(self.synapses_state_in_3.s_in) - proc.in_ports.cost_in.connect(self.synapses_cost_in.s_in) - proc.in_ports.timestep_in.connect(self.synapses_timestep_in.s_in) - - # Connect intermediate ports - self.synapses_state_in_0.a_out.connect(self.spike_integrators.a_in) - self.synapses_state_in_1.a_out.connect(self.spike_integrators.a_in) - self.synapses_state_in_2.a_out.connect(self.spike_integrators.a_in) - self.synapses_state_in_3.a_out.connect(self.spike_integrators.a_in) - self.synapses_cost_in.a_out.connect(self.spike_integrators.a_in) - self.synapses_timestep_in.a_out.connect(self.spike_integrators.a_in) - self.spike_integrators.s_out.connect( self.solution_receiver.results_in, connection_config) From 0216983d9f93012b10cb5c0b8eaf773268b3022b Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Mon, 22 Jan 2024 13:21:09 +0100 Subject: [PATCH 20/34] refractor --- .../solvers/generic/cost_integrator/models.py | 2 +- .../generic/cost_integrator/process.py | 47 ++---- .../solvers/generic/nebm/models.py | 2 +- .../solvers/generic/nebm/process.py | 8 +- .../solvers/generic/solution_reader/models.py | 2 +- .../optimization/solvers/generic/solver.py | 10 +- .../solvers/qubo/cost_integrator/process.py | 105 ++++++++++++ .../qubo/simulated_annealing/process.py | 153 ++++++++++++++++++ .../solution_readout}/models.py | 12 +- .../solution_readout}/process.py | 0 .../solution_readout/test_solution_readout.py | 2 +- 11 files changed, 282 insertions(+), 61 deletions(-) create mode 100644 src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py create mode 100644 src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py rename src/lava/lib/optimization/solvers/{generic/solution_readout_ethernet => qubo/solution_readout}/models.py (96%) rename src/lava/lib/optimization/solvers/{generic/solution_readout_ethernet => qubo/solution_readout}/process.py (100%) diff --git a/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py b/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py index a19ecb4a..9fc7fcae 100644 --- a/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py +++ b/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py @@ -10,7 +10,7 @@ from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.lib.optimization.solvers.generic.cost_integrator.process import ( +from lava.lib.optimization.solvers.qubo.cost_integrator.process import ( CostIntegrator, ) diff --git a/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py b/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py index 04cc940b..594539e5 100644 --- a/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/generic/cost_integrator/process.py @@ -55,51 +55,28 @@ def __init__( self, *, shape: ty.Tuple[int, ...] = (1,), - target_cost: int = -2**31 + 1, - timeout: int = 2**24 - 1, + min_cost: int = 0, # trivial solution, where all variables are 0 name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: - - self._input_validation(target_cost=target_cost, - timeout=timeout) - - super().__init__(shape=shape, - target_cost = target_cost, - timeout = timeout, - name=name, - log_config=log_config) + super().__init__(shape=shape, name=name, log_config=log_config) self.cost_in = InPort(shape=shape) - self.control_states_out = OutPort(shape=shape) - self.best_cost_out = OutPort(shape=shape) - self.best_timestep_out = OutPort(shape=shape) - - # Counter for timesteps - self.timestep = Var(shape=shape, init=0) - # Storage for best current time step - self.best_timestep = Var(shape=shape, init=0) + self.cost_out_last_bytes = OutPort(shape=shape) + self.cost_out_first_byte = OutPort(shape=shape) - # Var to store current cost + # Current cost initiated to zero # Note: Total cost = cost_first_byte << 24 + cost_last_bytes - # last 24 bit of cost self.cost_last_bytes = Var(shape=shape, init=0) # first 8 bit of cost self.cost_first_byte = Var(shape=shape, init=0) - # Var to store best cost found to far # Note: Total min cost = cost_min_first_byte << 24 + cost_min_last_bytes + # Extract first 8 bit + cost_min_first_byte = np.right_shift(min_cost, 24) + cost_min_first_byte = max(-2 ** 7, min(cost_min_first_byte, 2 ** 7 - 1)) + # Extract last 24 bit + cost_min_last_bytes = min_cost & 2 ** 24 - 1 # last 24 bit of cost - self.cost_min_last_bytes = Var(shape=shape, init=0) + self.cost_min_last_bytes = Var(shape=shape, init=cost_min_last_bytes) # first 8 bit of cost - self.cost_min_first_byte = Var(shape=shape, init=0) - - @staticmethod - def _input_validation(target_cost, timeout) -> None: - - assert (target_cost is not None and timeout is not None), \ - f"Both the target_cost and the timeout must be defined" - assert 0 > target_cost >= -2**31 + 1, \ - f"The target cost must in the range [-2**32 + 1, 0), " \ - f"but is {target_cost}." - assert 0 < timeout <= 2**24 - 1, f"The timeout must be in the range (" \ - f"0, 2**24 - 1], but is {timeout}." \ No newline at end of file + self.cost_min_first_byte = Var(shape=shape, init=cost_min_first_byte) diff --git a/src/lava/lib/optimization/solvers/generic/nebm/models.py b/src/lava/lib/optimization/solvers/generic/nebm/models.py index c11b2ed0..587e32eb 100644 --- a/src/lava/lib/optimization/solvers/generic/nebm/models.py +++ b/src/lava/lib/optimization/solvers/generic/nebm/models.py @@ -1,6 +1,6 @@ import numpy as np -from lava.lib.optimization.solvers.generic.nebm.process import NEBM +from lava.lib.optimization.solvers.qubo.simulated_annealing.process import NEBM from lava.magma.core.decorator import implements, requires, tag from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.model.py.ports import PyInPort, PyOutPort diff --git a/src/lava/lib/optimization/solvers/generic/nebm/process.py b/src/lava/lib/optimization/solvers/generic/nebm/process.py index 14edd7cd..244e6541 100644 --- a/src/lava/lib/optimization/solvers/generic/nebm/process.py +++ b/src/lava/lib/optimization/solvers/generic/nebm/process.py @@ -115,10 +115,8 @@ def __init__( self.a_in = InPort(shape=shape) self.delta_temperature_in = InPort(shape=shape) - self.control_cost_integrator = InPort(shape=shape) self.s_sig_out = OutPort(shape=shape) self.s_wta_out = OutPort(shape=shape) - self.best_state_out = OutPort(shape=shape) self.spk_hist = Var( shape=shape, init=(np.zeros(shape=shape) + init_value).astype(int) @@ -133,11 +131,7 @@ def __init__( np.random.randint(0, 2**8, size=shape), (refract_scaling or 0) ), ) - # Storage for the best state. Will get updated whenever a better - # state was found - # Default is all zeros - self.best_state = Var(shape=shape, - init=np.zeros(shape=shape, dtype=int)) + # Initial state determined in DiscreteVariables self.state = Var( shape=shape, diff --git a/src/lava/lib/optimization/solvers/generic/solution_reader/models.py b/src/lava/lib/optimization/solvers/generic/solution_reader/models.py index bef081f4..4008eb80 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_reader/models.py +++ b/src/lava/lib/optimization/solvers/generic/solution_reader/models.py @@ -30,7 +30,7 @@ def __init__(self, proc): time_steps_per_algorithmic_step=time_steps_per_algorithmic_step, ) self.read_gate.cost_out.connect(self.solution_readout.cost_in) - self.read_gate.solution_out.connect(self.solution_readout.state_in) + self.read_gate.solution_out.connect(self.solution_readout.read_solution) self.read_gate.send_pause_request.connect( self.solution_readout.timestep_in ) diff --git a/src/lava/lib/optimization/solvers/generic/solver.py b/src/lava/lib/optimization/solvers/generic/solver.py index abd9a7d2..60a79b74 100644 --- a/src/lava/lib/optimization/solvers/generic/solver.py +++ b/src/lava/lib/optimization/solvers/generic/solver.py @@ -37,7 +37,7 @@ from lava.lib.optimization.problems.problems import QUBO from lava.lib.optimization.problems.problems import OptimizationProblem from lava.lib.optimization.solvers.generic.builder import SolverProcessBuilder -from lava.lib.optimization.solvers.generic.cost_integrator.process import ( +from lava.lib.optimization.solvers.qubo.cost_integrator.process import ( CostIntegrator, ) from lava.lib.optimization.solvers.generic.hierarchical_processes import ( @@ -50,9 +50,8 @@ SolutionReadoutPyModel, ) from lava.lib.optimization.solvers.generic.nebm.models import NEBMPyModel -from lava.lib.optimization.solvers.generic.nebm.process import ( +from lava.lib.optimization.solvers.qubo.simulated_annealing.process import ( NEBM, - SimulatedAnnealing, SimulatedAnnealingLocal, ) from lava.lib.optimization.solvers.generic.annealing.process import Annealing @@ -124,11 +123,6 @@ class NcL2ModelPG: class NcL2ModelPI: pass - -from lava.lib.optimization.solvers.generic.read_gate.models import ( - ReadGatePyModel, -) - BACKENDS = ty.Union[CPU, Loihi2NeuroCore, NeuroCore, str] HP_TYPE = ty.Union[ty.Dict, ty.List[ty.Dict]] CPUS = [CPU, "CPU"] diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py new file mode 100644 index 00000000..04cc940b --- /dev/null +++ b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py @@ -0,0 +1,105 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ +import typing as ty + +import numpy as np +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.process import AbstractProcess, LogConfig +from lava.magma.core.process.variable import Var + + +class CostIntegrator(AbstractProcess): + """Node that integrates cost components and produces output when a better + cost is found. + + Parameters + ---------- + shape : tuple(int) + The expected number and topology of the input cost components. + name : str, optional + Name of the Process. Default is 'Process_ID', where ID is an + integer value that is determined automatically. + log_config: Configuration options for logging. + + InPorts + ------- + cost_in + input to be additively integrated. + + OutPorts + -------- + cost_out_last_bytes: OutPort + Notifies the next process about the detection of a better cost. + Messages the last 3 byte of the new best cost. + Total cost = cost_out_first_byte << 24 + cost_out_last_bytes. + cost_out_first_byte: OutPort + Notifies the next process about the detection of a better cost. + Messages the first byte of the new best cost. + + Vars + ---- + cost + Holds current cost as addition of input spikes' payloads + + cost_min_last_bytes + Current minimum cost, i.e., the lowest reported cost so far. + Saves the last 3 bytes. + cost_min = cost_min_first_byte << 24 + cost_min_last_bytes + cost_min_first_byte + Current minimum cost, i.e., the lowest reported cost so far. + Saves the first byte. + """ + + def __init__( + self, + *, + shape: ty.Tuple[int, ...] = (1,), + target_cost: int = -2**31 + 1, + timeout: int = 2**24 - 1, + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, + ) -> None: + + self._input_validation(target_cost=target_cost, + timeout=timeout) + + super().__init__(shape=shape, + target_cost = target_cost, + timeout = timeout, + name=name, + log_config=log_config) + self.cost_in = InPort(shape=shape) + self.control_states_out = OutPort(shape=shape) + self.best_cost_out = OutPort(shape=shape) + self.best_timestep_out = OutPort(shape=shape) + + # Counter for timesteps + self.timestep = Var(shape=shape, init=0) + # Storage for best current time step + self.best_timestep = Var(shape=shape, init=0) + + # Var to store current cost + # Note: Total cost = cost_first_byte << 24 + cost_last_bytes + # last 24 bit of cost + self.cost_last_bytes = Var(shape=shape, init=0) + # first 8 bit of cost + self.cost_first_byte = Var(shape=shape, init=0) + + # Var to store best cost found to far + # Note: Total min cost = cost_min_first_byte << 24 + cost_min_last_bytes + # last 24 bit of cost + self.cost_min_last_bytes = Var(shape=shape, init=0) + # first 8 bit of cost + self.cost_min_first_byte = Var(shape=shape, init=0) + + @staticmethod + def _input_validation(target_cost, timeout) -> None: + + assert (target_cost is not None and timeout is not None), \ + f"Both the target_cost and the timeout must be defined" + assert 0 > target_cost >= -2**31 + 1, \ + f"The target cost must in the range [-2**32 + 1, 0), " \ + f"but is {target_cost}." + assert 0 < timeout <= 2**24 - 1, f"The timeout must be in the range (" \ + f"0, 2**24 - 1], but is {timeout}." \ No newline at end of file diff --git a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py new file mode 100644 index 00000000..f2c980c1 --- /dev/null +++ b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py @@ -0,0 +1,153 @@ +import numpy as np +import typing as ty +from numpy import typing as npty + +from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.variable import Var + + +class SimulatedAnnealingLocal(AbstractProcess): + """ + Non-equilibrium Boltzmann (NEBM) neuron model to solve QUBO problems. + This model uses purely information available at the level of individual + neurons to decide whether to switch or not, in contrast to the inheriting + Process NEBMSimulatedAnnealing. + """ + + def __init__( + self, + *, + shape: ty.Tuple[int, ...], + cost_diagonal: npty.ArrayLike, + max_temperature: npty.ArrayLike, + refract_scaling: ty.Union[npty.ArrayLike, None], + refract_seed: int, + init_value: npty.ArrayLike, + init_state: npty.ArrayLike, + ): + """ + SA Process. + + Parameters + ---------- + shape: Tuple + Number of neurons. Default is (1,). + + refract_scaling : ArrayLike + After a neuron has switched its binary variable, it remains in a + refractory state that prevents any variable switching for a + number of time steps. This number of time steps is determined by + rand(0, 255) >> refract_scaling + Refract_scaling thus denotes the order of magnitude of timesteps a + neuron remains in a state after a transition. + refract_seed : int + Random seed to initialize the refractory periods. Allows + repeatability. + init_value : ArrayLike + The spiking history with which the network is initialized + init_state : ArrayLike + The state of neurons with which the network is initialized + neuron_model : str + The neuron model to be used. The latest list of allowed values + can be found in NEBMSimulatedAnnealing.enabled_neuron_models. + """ + + super().__init__( + shape=shape, + cost_diagonal=cost_diagonal, + refract_scaling=refract_scaling, + ) + + self.a_in = InPort(shape=shape) + self.delta_temperature_in = InPort(shape=shape) + self.control_cost_integrator = InPort(shape=shape) + self.s_sig_out = OutPort(shape=shape) + self.s_wta_out = OutPort(shape=shape) + self.best_state_out = OutPort(shape=shape) + + self.spk_hist = Var( + shape=shape, init=(np.zeros(shape=shape) + init_value).astype(int) + ) + + self.temperature = Var(shape=shape, init=int(max_temperature)) + + np.random.seed(refract_seed) + self.refract_counter = Var( + shape=shape, + init=0 + np.right_shift( + np.random.randint(0, 2**8, size=shape), (refract_scaling or 0) + ), + ) + # Storage for the best state. Will get updated whenever a better + # state was found + # Default is all zeros + self.best_state = Var(shape=shape, + init=np.zeros(shape=shape, dtype=int)) + # Initial state determined in DiscreteVariables + self.state = Var( + shape=shape, + init=init_state.astype(int) + if init_state is not None + else np.zeros(shape=shape, dtype=int), + ) + + @property + def shape(self) -> ty.Tuple[int, ...]: + return self.proc_params["shape"] + + +class SimulatedAnnealing(SimulatedAnnealingLocal): + """ + Non-equilibrium Boltzmann (NEBM) neuron model to solve QUBO problems. + This model combines the switching intentions of all NEBM neurons to + decide whether to switch or not, to avoid conflicting variable switches. + """ + + def __init__( + self, + *, + shape: ty.Tuple[int, ...], + cost_diagonal: npty.ArrayLike, + max_temperature: npty.ArrayLike, + init_value: npty.ArrayLike, + init_state: npty.ArrayLike, + ): + """ + SA Process. + + Parameters + ---------- + shape: Tuple + Number of neurons. Default is (1,). + + refract_scaling : ArrayLike + After a neuron has switched its binary variable, it remains in a + refractory state that prevents any variable switching for a + number of time steps. This number of time steps is determined by + rand(0, 255) >> refract_scaling + Refract_scaling thus denotes the order of magnitude of timesteps a + neuron remains in a state after a transition. + init_value : ArrayLike + The spiking history with which the network is initialized + init_state : ArrayLike + The state of neurons with which the network is initialized + neuron_model : str + The neuron model to be used. The latest list of allowed values + can be found in NEBMSimulatedAnnealing.enabled_neuron_models. + """ + + super().__init__( + shape=shape, + cost_diagonal=cost_diagonal, + max_temperature=max_temperature, + refract_scaling=None, + refract_seed=0, + init_value=init_value, + init_state=init_state, + ) + + # number of NEBM neurons that suggest switching in a time step + self.n_switches_in = InPort(shape=shape) + # port to notify other NEBM neurons of switching intentions + self.suggest_switch_out = OutPort(shape=shape) diff --git a/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py similarity index 96% rename from src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py rename to src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 4b5b78a9..4a4b74b1 100644 --- a/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -2,27 +2,25 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import numpy as np -from lava.lib.optimization.solvers.generic.solution_readout_ethernet.process import SolutionReadoutEthernet +from lava.lib.optimization.solvers.qubo.solution_readout.process import ( + SolutionReadoutEthernet +) from lava.magma.core.decorator import implements, requires from lava.magma.core.model.py.model import ( - PyLoihiProcessModel, PyAsyncProcessModel ) -from bitstring import Bits from lava.magma.core.model.py.ports import PyInPort from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.model.sub.model import AbstractSubProcessModel from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol -from lava.lib.optimization.solvers.generic.solution_readout_ethernet.process import \ +from lava.lib.optimization.solvers.qubo.solution_readout.process import \ ( SolutionReceiver, SpikeIntegrator ) from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.proc.sparse.process import Sparse, DelaySparse -from lava.utils.weightutils import SignMode -from lava.proc import embedded_io as eio +from lava.proc.sparse.process import Sparse from scipy.sparse import csr_matrix diff --git a/src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py similarity index 100% rename from src/lava/lib/optimization/solvers/generic/solution_readout_ethernet/process.py rename to src/lava/lib/optimization/solvers/qubo/solution_readout/process.py diff --git a/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py b/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py index 20cab0b8..5fe8d281 100644 --- a/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py +++ b/tests/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/test_solution_readout.py @@ -32,7 +32,7 @@ def setUp(self) -> None: integrator_last_bytes.s_out.connect(readgate.cost_in_last_bytes_0) integrator_first_byte.s_out.connect(readgate.cost_in_first_byte_0) readgate.solution_reader.connect_var(spiker.payload) - readgate.solution_out.connect(self.readout.state_in) + readgate.solution_out.connect(self.readout.read_solution) readgate.cost_out.connect(self.readout.cost_in) readgate.send_pause_request.connect(self.readout.timestep_in) From 40617b87209926adb5545d7870ff98be9c6dd95a Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Mon, 22 Jan 2024 13:38:25 +0100 Subject: [PATCH 21/34] refractor --- .../solvers/generic/cost_integrator/models.py | 2 +- .../lib/optimization/solvers/generic/nebm/models.py | 6 +++++- src/lava/lib/optimization/solvers/generic/solver.py | 10 ++++++++-- .../solvers/qubo/simulated_annealing/process.py | 4 ++++ 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py b/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py index 9fc7fcae..a19ecb4a 100644 --- a/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py +++ b/src/lava/lib/optimization/solvers/generic/cost_integrator/models.py @@ -10,7 +10,7 @@ from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.lib.optimization.solvers.qubo.cost_integrator.process import ( +from lava.lib.optimization.solvers.generic.cost_integrator.process import ( CostIntegrator, ) diff --git a/src/lava/lib/optimization/solvers/generic/nebm/models.py b/src/lava/lib/optimization/solvers/generic/nebm/models.py index 587e32eb..219e8c41 100644 --- a/src/lava/lib/optimization/solvers/generic/nebm/models.py +++ b/src/lava/lib/optimization/solvers/generic/nebm/models.py @@ -1,6 +1,10 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + import numpy as np -from lava.lib.optimization.solvers.qubo.simulated_annealing.process import NEBM +from lava.lib.optimization.solvers.generic.nebm.process import NEBM from lava.magma.core.decorator import implements, requires, tag from lava.magma.core.model.py.model import PyLoihiProcessModel from lava.magma.core.model.py.ports import PyInPort, PyOutPort diff --git a/src/lava/lib/optimization/solvers/generic/solver.py b/src/lava/lib/optimization/solvers/generic/solver.py index 60a79b74..abd9a7d2 100644 --- a/src/lava/lib/optimization/solvers/generic/solver.py +++ b/src/lava/lib/optimization/solvers/generic/solver.py @@ -37,7 +37,7 @@ from lava.lib.optimization.problems.problems import QUBO from lava.lib.optimization.problems.problems import OptimizationProblem from lava.lib.optimization.solvers.generic.builder import SolverProcessBuilder -from lava.lib.optimization.solvers.qubo.cost_integrator.process import ( +from lava.lib.optimization.solvers.generic.cost_integrator.process import ( CostIntegrator, ) from lava.lib.optimization.solvers.generic.hierarchical_processes import ( @@ -50,8 +50,9 @@ SolutionReadoutPyModel, ) from lava.lib.optimization.solvers.generic.nebm.models import NEBMPyModel -from lava.lib.optimization.solvers.qubo.simulated_annealing.process import ( +from lava.lib.optimization.solvers.generic.nebm.process import ( NEBM, + SimulatedAnnealing, SimulatedAnnealingLocal, ) from lava.lib.optimization.solvers.generic.annealing.process import Annealing @@ -123,6 +124,11 @@ class NcL2ModelPG: class NcL2ModelPI: pass + +from lava.lib.optimization.solvers.generic.read_gate.models import ( + ReadGatePyModel, +) + BACKENDS = ty.Union[CPU, Loihi2NeuroCore, NeuroCore, str] HP_TYPE = ty.Union[ty.Dict, ty.List[ty.Dict]] CPUS = [CPU, "CPU"] diff --git a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py index f2c980c1..5edfb562 100644 --- a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py +++ b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py @@ -1,3 +1,7 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + import numpy as np import typing as ty from numpy import typing as npty From a34329f80997829e5549818cd67c3ef6521e348a Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Mon, 22 Jan 2024 16:14:24 +0100 Subject: [PATCH 22/34] lintin --- .../solvers/qubo/cost_integrator/process.py | 38 +++++----- .../solvers/qubo/solution_readout/models.py | 75 +++++++------------ .../solvers/qubo/solution_readout/process.py | 39 +++++----- 3 files changed, 68 insertions(+), 84 deletions(-) diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py index 04cc940b..28a134f1 100644 --- a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py @@ -52,21 +52,20 @@ class CostIntegrator(AbstractProcess): """ def __init__( - self, - *, - shape: ty.Tuple[int, ...] = (1,), - target_cost: int = -2**31 + 1, - timeout: int = 2**24 - 1, - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, + self, + *, + shape: ty.Tuple[int, ...] = (1,), + target_cost: int = -2 ** 31 + 1, + timeout: int = 2 ** 24 - 1, + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, ) -> None: - self._input_validation(target_cost=target_cost, timeout=timeout) super().__init__(shape=shape, - target_cost = target_cost, - timeout = timeout, + target_cost=target_cost, + timeout=timeout, name=name, log_config=log_config) self.cost_in = InPort(shape=shape) @@ -95,11 +94,14 @@ def __init__( @staticmethod def _input_validation(target_cost, timeout) -> None: - - assert (target_cost is not None and timeout is not None), \ - f"Both the target_cost and the timeout must be defined" - assert 0 > target_cost >= -2**31 + 1, \ - f"The target cost must in the range [-2**32 + 1, 0), " \ - f"but is {target_cost}." - assert 0 < timeout <= 2**24 - 1, f"The timeout must be in the range (" \ - f"0, 2**24 - 1], but is {timeout}." \ No newline at end of file + if (target_cost is None and timeout is None): + raise ValueError( + f"Both the target_cost and the timeout must be defined") + if target_cost > 0 or target_cost < - 2 ** 31 + 1: + raise ValueError( + f"The target cost must in the range [-2**32 + 1, 0], " + f"but is {target_cost}.") + if timeout <= 0 or timeout > 2 ** 24 - 1: + raise ValueError( + f"The timeout must be in the range (0, 2**24 - 1], but is " + f"{timeout}.") diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 4a4b74b1..4ee73594 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -15,8 +15,7 @@ from lava.magma.core.resources import CPU from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol -from lava.lib.optimization.solvers.qubo.solution_readout.process import \ - ( +from lava.lib.optimization.solvers.qubo.solution_readout.process import ( SolutionReceiver, SpikeIntegrator ) from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol @@ -62,7 +61,7 @@ def run_async(self): _, _, states = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, - num_vars=num_vars) #[:self.best_state.shape[0]] + num_vars=num_vars) self.best_state = states self._req_pause = True @@ -76,8 +75,7 @@ def _decompress_state(compressed_states, num_message_bits, num_vars): cost = int(compressed_states[0]) timestep = int(compressed_states[1]) states = (compressed_states[2:, None] & ( - 1 << np.arange(0, num_message_bits))) != 0 - #1 << np.arange(num_message_bits - 1, -1, -1))) != 0 + 1 << np.arange(0, num_message_bits))) != 0 # reshape into a 1D array states.reshape(-1) # If n_vars is not a multiple of num_message_bits, then last entries @@ -86,22 +84,6 @@ def _decompress_state(compressed_states, num_message_bits, num_vars): return cost, timestep, states - - -""" -def test_code(): - - # Assuming you have a 32-bit integer numpy array - original_array = np.array([4294967295, 2147483647, 0, 8983218], - dtype=np.uint32) - - # Use bitwise AND operation to convert each integer to a boolean array - boolean_array = (original_array[:, None] & (1 << np.arange(31, -1, -1))) != 0 - - # Display the result - print(boolean_array) -""" - @implements(proc=SolutionReadoutEthernet, protocol=LoihiProtocol) @requires(CPU) class SolutionReadoutEthernetModel(AbstractSubProcessModel): @@ -128,7 +110,6 @@ def __init__(self, proc): ) self.synapses_state_in_0 = Sparse( weights=weights_state_in_0, - #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=0, @@ -146,7 +127,6 @@ def __init__(self, proc): ) self.synapses_state_in_1 = Sparse( weights=weights_state_in_1, - #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=8, @@ -154,7 +134,7 @@ def __init__(self, proc): proc.in_ports.states_in.connect(self.synapses_state_in_1.s_in) self.synapses_state_in_1.a_out.connect(self.spike_integrators.a_in) - + if num_bin_variables > 16: weights_state_in_2 = self._get_input_weights( num_vars=num_bin_variables, @@ -164,7 +144,6 @@ def __init__(self, proc): ) self.synapses_state_in_2 = Sparse( weights=weights_state_in_2, - #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=16, @@ -182,7 +161,6 @@ def __init__(self, proc): ) self.synapses_state_in_3 = Sparse( weights=weights_state_in_3, - #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=24, @@ -190,7 +168,7 @@ def __init__(self, proc): proc.in_ports.states_in.connect(self.synapses_state_in_3.s_in) self.synapses_state_in_3.a_out.connect(self.spike_integrators.a_in) - # Connect the CostIntegrator + # Connect the CostIntegrator weights_cost_in = self._get_cost_in_weights( num_spike_int=num_spike_integrators, ) @@ -205,26 +183,24 @@ def __init__(self, proc): ) self.synapses_timestep_in = Sparse( weights=weights_timestep_in, - #sign_mode=SignMode.EXCITATORY, num_weight_bits=8, num_message_bits=32, ) - + proc.in_ports.cost_in.connect(self.synapses_cost_in.s_in) self.synapses_cost_in.a_out.connect(self.spike_integrators.a_in) proc.in_ports.timestep_in.connect(self.synapses_timestep_in.s_in) self.synapses_timestep_in.a_out.connect(self.spike_integrators.a_in) # Define and connect the SolutionReceiver - self.solution_receiver = SolutionReceiver( shape=(1,), - num_variables = num_bin_variables, - num_spike_integrators = num_spike_integrators, - num_message_bits = num_message_bits, - best_cost_init = proc.best_cost.get(), - best_state_init = proc.best_state.get(), - best_timestep_init = proc.best_timestep.get() + num_variables=num_bin_variables, + num_spike_integrators=num_spike_integrators, + num_message_bits=num_message_bits, + best_cost_init=proc.best_cost.get(), + best_state_init=proc.best_state.get(), + best_timestep_init=proc.best_timestep.get() ) self.spike_integrators.s_out.connect( @@ -236,22 +212,26 @@ def __init__(self, proc): proc.vars.best_cost.alias(self.solution_receiver.best_cost) @staticmethod - def _get_input_weights(num_vars, num_spike_int, num_vars_per_int, weight_exp): + def _get_input_weights(num_vars, + num_spike_int, + num_vars_per_int, + weight_exp) -> csr_matrix: """To be verified. Deprecated due to efficiency""" weights = np.zeros((num_spike_int, num_vars), dtype=np.uint8) # The first two SpikeIntegrators receive best_cost and best_timestep for spike_integrator in range(2, num_spike_int - 1): - variable_start = num_vars_per_int * (spike_integrator - 2) + weight_exp - weights[spike_integrator, variable_start:variable_start + - 8] = np.power(2, - np.arange(8)) + variable_start = num_vars_per_int * (spike_integrator - 2) + \ + weight_exp + weights[spike_integrator, variable_start:variable_start + 8] = \ + np.power(2, np.arange(8)) # The last spike integrator might be connected by less than # num_vars_per_int neurons # This happens when mod(num_variables, num_vars_per_int) != 0 variable_start = num_vars_per_int * (num_spike_int - 3) + weight_exp - weights[-1, variable_start:] = np.power(2, np.arange(weights.shape[1]-variable_start)) + weights[-1, variable_start:] = np.power(2, np.arange(weights.shape[1] + - variable_start)) return csr_matrix(weights) @@ -261,10 +241,11 @@ def _get_state_in_weights_index(num_vars, num_spike_int, num_vars_per_int): weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) # Compute the indices for setting the values to 1 - indices = np.arange(0, num_vars_per_int * (num_spike_int - 1), num_vars_per_int) + indices = np.arange(0, num_vars_per_int * (num_spike_int - 1), + num_vars_per_int) # Set the values to 1 using array indexing - weights[:num_spike_int-1, indices:indices + num_vars_per_int] = 1 + weights[:num_spike_int - 1, indices:indices + num_vars_per_int] = 1 # Set the values for the last spike integrator weights[-1, num_vars_per_int * (num_spike_int - 1):num_vars] = 1 @@ -274,11 +255,11 @@ def _get_state_in_weights_index(num_vars, num_spike_int, num_vars_per_int): @staticmethod def _get_cost_in_weights(num_spike_int: int) -> csr_matrix: weights = np.zeros((num_spike_int, 1), dtype=int) - weights[0,0] = 1 + weights[0, 0] = 1 return csr_matrix(weights) - + @staticmethod def _get_timestep_in_weights(num_spike_int: int) -> csr_matrix: weights = np.zeros((num_spike_int, 1), dtype=int) - weights[1,0] = 1 + weights[1, 0] = 1 return csr_matrix(weights) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py index f6f28586..4a7808e8 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py @@ -11,6 +11,7 @@ from lava.magma.core.process.ports.connection_config import ConnectionConfig + class SpikeIntegrator(AbstractProcess): """GradedVec Graded spike vector layer. Accumulates and forwards 32bit spikes. @@ -24,7 +25,6 @@ class SpikeIntegrator(AbstractProcess): def __init__( self, shape: ty.Tuple[int, ...]) -> None: - super().__init__(shape=shape) self.a_in = InPort(shape=shape) @@ -54,13 +54,13 @@ class SolutionReadoutEthernet(AbstractProcess): """ def __init__( - self, - shape: ty.Tuple[int, ...], - connection_config: ConnectionConfig, - num_bin_variables: int, - num_message_bits = 32, - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, + self, + shape: ty.Tuple[int, ...], + connection_config: ConnectionConfig, + num_bin_variables: int, + num_message_bits=32, + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, ) -> None: """ Parameters @@ -80,7 +80,8 @@ def __init__( log_config: LogConfig, optional Configuration options for logging.z""" - num_spike_integrators = 2 + np.ceil(num_bin_variables / num_message_bits).astype(int) + num_spike_integrators = 2 + np.ceil( + num_bin_variables / num_message_bits).astype(int) super().__init__( shape=shape, @@ -130,16 +131,16 @@ class SolutionReceiver(AbstractProcess): """ def __init__( - self, - shape: ty.Tuple[int, ...], - num_variables: int, - best_cost_init: int, - best_state_init: ty.Union[npty.ArrayLike, int], - num_spike_integrators: int, - best_timestep_init: int, - num_message_bits: int = 24, - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, + self, + shape: ty.Tuple[int, ...], + num_variables: int, + best_cost_init: int, + best_state_init: ty.Union[npty.ArrayLike, int], + num_spike_integrators: int, + best_timestep_init: int, + num_message_bits: int = 24, + name: ty.Optional[str] = None, + log_config: ty.Optional[LogConfig] = None, ) -> None: super().__init__( shape=shape, From 53ab26894f38d509eac58d60ecc7d15e2cf35e03 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 23 Jan 2024 13:24:14 +0100 Subject: [PATCH 23/34] input validation and docstrings --- .../solvers/qubo/cost_integrator/process.py | 52 +++++++++++++------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py index 28a134f1..02cb11c1 100644 --- a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py @@ -10,13 +10,24 @@ class CostIntegrator(AbstractProcess): - """Node that integrates cost components and produces output when a better - cost is found. + """Node that monitors execution of the QUBOSolver. It integrates the cost + components from all variables. Whenever a new better solution is found, + it stores the new best cost and the associated timestep, while triggering + the variable neurons to store the new best state. Waits for stopping + criteria to be reached, either target_cost or timeout. Once reached, + it spikes out the best cost, timestep, and a trigger for the variable + neurons to spike out the best state. Parameters ---------- shape : tuple(int) The expected number and topology of the input cost components. + target_cost: int + Target cost of the QUBO solver. Once reached, the best_cost, + best_timestep, and best_state are spiked out. + timeout: int + Timeout of the QUBO solver. Once reached, the best_cost, + best_timestep, and best_state are spiked out. name : str, optional Name of the Process. Default is 'Process_ID', where ID is an integer value that is determined automatically. @@ -25,23 +36,27 @@ class CostIntegrator(AbstractProcess): InPorts ------- cost_in - input to be additively integrated. + input from the variable neurons. Added, this input denotes + the total cost of the current variable assignment. OutPorts -------- - cost_out_last_bytes: OutPort - Notifies the next process about the detection of a better cost. - Messages the last 3 byte of the new best cost. - Total cost = cost_out_first_byte << 24 + cost_out_last_bytes. - cost_out_first_byte: OutPort - Notifies the next process about the detection of a better cost. - Messages the first byte of the new best cost. + control_states_out + Port to the variable neurons. + Can send either of the following three values: + 1 -> store the state, since it is the new best state + 2 -> store the state and spike it, since stopping criteria reached + 3 -> spike the best state + best_cost_out + Port to the SolutionReadout. Sends the best cost found. + best_timestep_out + Port to the SolutionReadout. Sends the timestep when the best cost + was found. Vars ---- - cost - Holds current cost as addition of input spikes' payloads - + timestep + Holds current timestep cost_min_last_bytes Current minimum cost, i.e., the lowest reported cost so far. Saves the last 3 bytes. @@ -49,14 +64,21 @@ class CostIntegrator(AbstractProcess): cost_min_first_byte Current minimum cost, i.e., the lowest reported cost so far. Saves the first byte. + cost_last_bytes + Current cost. + Saves the last 3 bytes. + cost_min = cost_min_first_byte << 24 + cost_min_last_bytes + cost_first_byte + Current cost. + Saves the first byte. """ def __init__( self, *, + target_cost: int, + timeout: int, shape: ty.Tuple[int, ...] = (1,), - target_cost: int = -2 ** 31 + 1, - timeout: int = 2 ** 24 - 1, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: From 87f14e1adc90035c22a36ac0bc7d4b00d67bcc54 Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 23 Jan 2024 14:58:03 +0100 Subject: [PATCH 24/34] added timeout in SolutionReceiver --- .../solvers/qubo/solution_readout/models.py | 16 ++++- .../solvers/qubo/solution_readout/process.py | 69 ++++++++++--------- 2 files changed, 51 insertions(+), 34 deletions(-) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 4ee73594..097cdf0d 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -2,6 +2,8 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import numpy as np +import itertools + from lava.lib.optimization.solvers.qubo.solution_readout.process import ( SolutionReadoutEthernet ) @@ -39,6 +41,7 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): best_timestep: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) best_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) num_message_bits: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) + timeout: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) results_in: PyInPort = LavaPyType( PyInPort.VEC_DENSE, np.int32, precision=32 @@ -47,10 +50,14 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): def run_async(self): num_message_bits = self.num_message_bits[0] num_vars = self.best_state.shape[0] + timeout = self.timeout[0] - results_buffer = np.zeros(self.results_in._shape) - while self._check_if_input(results_buffer): + # Iterating for timeout - 1 because an additional step is used to + # recv the state + for _ in itertools.repeat(None, timeout - 1): results_buffer = self.results_in.recv() + if self._check_if_input(results_buffer): + break self.best_cost, self.best_timestep, _ = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, @@ -67,7 +74,7 @@ def run_async(self): @staticmethod def _check_if_input(results_buffer): - return not results_buffer[1] > 0 + return results_buffer[1] > 0 @staticmethod def _decompress_state(compressed_states, num_message_bits, num_vars): @@ -94,6 +101,8 @@ class SolutionReadoutEthernetModel(AbstractSubProcessModel): def __init__(self, proc): num_message_bits = proc.proc_params.get("num_message_bits") + timeout = proc.proc_params.get("timeout") + # Define the dense input layer num_bin_variables = proc.proc_params.get("num_bin_variables") num_spike_integrators = proc.proc_params.get("num_spike_integrators") @@ -198,6 +207,7 @@ def __init__(self, proc): num_variables=num_bin_variables, num_spike_integrators=num_spike_integrators, num_message_bits=num_message_bits, + timeout=timeout, best_cost_init=proc.best_cost.get(), best_state_init=proc.best_state.get(), best_timestep_init=proc.best_timestep.get() diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py index 4a7808e8..3863b8dd 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py @@ -37,27 +37,34 @@ class SolutionReadoutEthernet(AbstractProcess): Attributes ---------- - a_in: InPort - The addition of all inputs (per dynamical system) at this timestep - will be received by this port. - s_out: OutPort - The payload to be exchanged between the underlying dynamical systems - when these fire. - local_cost: OutPort - The cost components per dynamical system underlying these - variables, i.e., c_i = sum_j{Q_{ij} \cdot x_i} will be sent through - this port. The cost integrator will then complete the cost computation - by adding all contributions, i.e., x^T \cdot Q \cdot x = sum_i{c_i}. - variable_assignment: Var - Holds the current value assigned to the variables by - the solver network. + best_state: Var + Best binary variables assignment. + best_cost: Var + Cost of best solution. + best_timestep: Var + Time step when best solution was found. + + InPorts: + ---------- + states_in: InPort + Receives the best binary (1bit) states. Shape is determined by the + number of + binary variables. + cost_in: InPort + Receives the best 32bit cost. + timestep_in: InPort + Receives the best 32bit timestep. + + OutPorts: + ---------- """ def __init__( self, shape: ty.Tuple[int, ...], - connection_config: ConnectionConfig, + timeout: int, num_bin_variables: int, + connection_config: ConnectionConfig, num_message_bits=32, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, @@ -67,13 +74,11 @@ def __init__( ---------- shape: tuple A tuple of the form (number of variables, domain size). - cost_diagonal: npty.ArrayLike - The diagonal of the coefficient of the quadratic term on the cost - function. - cost_off_diagonal: npty.ArrayLike - The off-diagonal of the coefficient of the quadratic term on the - cost function. - hyperparameters: dict, optional + num_bin_variables: int + The number of binary (1bit) variables. + num_message_bits: int + Defines the number of bits of a single message via spikeIO. + Currently only tested for 32bits. name: str, optional Name of the Process. Default is 'Process_ID', where ID is an integer value that is determined automatically. @@ -87,6 +92,7 @@ def __init__( shape=shape, num_spike_integrators=num_spike_integrators, num_bin_variables=num_bin_variables, + timeout=timeout, num_message_bits=num_message_bits, connection_config=connection_config, name=name, @@ -107,14 +113,14 @@ class SolutionReceiver(AbstractProcess): Parameters ---------- shape: The shape of the set of nodes, or process, which state will be read. - target_cost: cost value at which, once attained by the network, - this process will stop execution. - name: Name of the Process. Default is 'Process_ID', where ID is an - integer value that is determined automatically. - log_config: Configuration options for logging. - time_steps_per_algorithmic_step: the number of iteration steps that a - single algorithmic step requires. This value is required to decode the - variable values from the spk_hist of a process. + target_cost: int + cost value at which, once attained by the network, this process will + stop execution. + name: str + Name of the Process. Default is 'Process_ID', where ID is an integer + value that is determined automatically. + log_config: + Configuration options for logging. Attributes ---------- @@ -134,6 +140,7 @@ def __init__( self, shape: ty.Tuple[int, ...], num_variables: int, + timeout: int, best_cost_init: int, best_state_init: ty.Union[npty.ArrayLike, int], num_spike_integrators: int, @@ -144,7 +151,6 @@ def __init__( ) -> None: super().__init__( shape=shape, - num_variables=num_variables, name=name, log_config=log_config, ) @@ -153,4 +159,5 @@ def __init__( self.best_timestep = Var(shape=(1,), init=best_timestep_init) self.best_cost = Var(shape=(1,), init=best_cost_init) self.num_message_bits = Var(shape=(1,), init=num_message_bits) + self.timeout = Var(shape=(1,), init=timeout) self.results_in = InPort(shape=(num_spike_integrators,)) From b532d53267e0ec31535fd79897f0e5d2ddd33f6c Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Tue, 23 Jan 2024 15:15:04 +0100 Subject: [PATCH 25/34] linting --- .../lib/optimization/solvers/qubo/cost_integrator/process.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py index 02cb11c1..864bd9f3 100644 --- a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py @@ -3,7 +3,6 @@ # See: https://spdx.org/licenses/ import typing as ty -import numpy as np from lava.magma.core.process.ports.ports import InPort, OutPort from lava.magma.core.process.process import AbstractProcess, LogConfig from lava.magma.core.process.variable import Var @@ -118,7 +117,7 @@ def __init__( def _input_validation(target_cost, timeout) -> None: if (target_cost is None and timeout is None): raise ValueError( - f"Both the target_cost and the timeout must be defined") + "Both the target_cost and the timeout must be defined") if target_cost > 0 or target_cost < - 2 ** 31 + 1: raise ValueError( f"The target cost must in the range [-2**32 + 1, 0], " From 9fa12456e5152b0e297be0872cc883a47fc9f89c Mon Sep 17 00:00:00 2001 From: "Stratmann, Philipp" Date: Wed, 24 Jan 2024 11:49:35 +0100 Subject: [PATCH 26/34] licenses --- .../generic/monitoring_processes/solution_readout/models.py | 2 +- src/lava/lib/optimization/solvers/generic/nebm/models.py | 2 +- .../lib/optimization/solvers/qubo/cost_integrator/process.py | 2 +- .../optimization/solvers/qubo/simulated_annealing/process.py | 2 +- .../lib/optimization/solvers/qubo/solution_readout/process.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py b/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py index 0e973a29..6221b9b8 100644 --- a/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/generic/monitoring_processes/solution_readout/models.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import numpy as np diff --git a/src/lava/lib/optimization/solvers/generic/nebm/models.py b/src/lava/lib/optimization/solvers/generic/nebm/models.py index 219e8c41..683883c4 100644 --- a/src/lava/lib/optimization/solvers/generic/nebm/models.py +++ b/src/lava/lib/optimization/solvers/generic/nebm/models.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py index 864bd9f3..951500b9 100644 --- a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import typing as ty diff --git a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py index 5edfb562..7e5046d1 100644 --- a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py +++ b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py index 3863b8dd..552003ac 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py @@ -1,4 +1,4 @@ -# Copyright (C) 2023 Intel Corporation +# Copyright (C) 2023-2024 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import numpy as np From c54bd4945f7ad697ce5b202113ce1d65b7a5d6f0 Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Tue, 6 Feb 2024 08:34:52 -0800 Subject: [PATCH 27/34] Bugfix in SolutionReceiver pymodel --- .../lib/optimization/solvers/qubo/solution_readout/models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 097cdf0d..80fd2578 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -70,7 +70,6 @@ def run_async(self): num_message_bits=num_message_bits, num_vars=num_vars) self.best_state = states - self._req_pause = True @staticmethod def _check_if_input(results_buffer): From f18037c0d244c1d5e7481b762e826d7eb6a4bea6 Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Wed, 7 Feb 2024 02:13:47 -0800 Subject: [PATCH 28/34] Expose steps_per_temperature as var in Annealing --- src/lava/lib/optimization/solvers/generic/annealing/process.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/lava/lib/optimization/solvers/generic/annealing/process.py b/src/lava/lib/optimization/solvers/generic/annealing/process.py index f2b02610..c3ce0fc8 100644 --- a/src/lava/lib/optimization/solvers/generic/annealing/process.py +++ b/src/lava/lib/optimization/solvers/generic/annealing/process.py @@ -77,6 +77,9 @@ def __init__( self.delta_temperature_out = OutPort(shape=shape) self.temperature = Var(shape=shape, init=np.int_(max_temperature)) + self.steps_per_temperature = Var( + shape=shape, init=np.int_(steps_per_temperature) + ) @property def shape(self) -> ty.Tuple[int, ...]: From 4052cda0a5e6ab4779aaddc6863c757b056dec67 Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Thu, 8 Feb 2024 08:12:25 -0800 Subject: [PATCH 29/34] Draft re-initialization of QUBOSolver Co-authored-by: Philipp Stratmann --- .../lib/optimization/solvers/qubo/solution_readout/models.py | 1 + .../lib/optimization/solvers/qubo/solution_readout/process.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 80fd2578..952becb6 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -219,6 +219,7 @@ def __init__(self, proc): proc.vars.best_state.alias(self.solution_receiver.best_state) proc.vars.best_timestep.alias(self.solution_receiver.best_timestep) proc.vars.best_cost.alias(self.solution_receiver.best_cost) + proc.vars.timeout.alias(self.solution_receiver.timeout) @staticmethod def _get_input_weights(num_vars, diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py index 552003ac..a24dfafd 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py @@ -105,7 +105,7 @@ def __init__( self.best_state = Var(shape=(num_bin_variables,), init=0) self.best_timestep = Var(shape=(1,), init=1) self.best_cost = Var(shape=(1,), init=0) - + self.timeout = Var(shape=(1,), init=timeout) class SolutionReceiver(AbstractProcess): """Process to readout solution from SNN and make it available on host. From 08c5641c2749be6123c3f0fa906a0669284f263e Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Mon, 12 Feb 2024 10:08:41 -0800 Subject: [PATCH 30/34] Enables re-initialization of QUBOSolver Co-authored-by: Philipp Stratmann --- .../solvers/generic/annealing/process.py | 3 + .../solvers/qubo/cost_integrator/process.py | 5 +- .../solvers/qubo/solution_readout/models.py | 68 ++++++++++++------- 3 files changed, 48 insertions(+), 28 deletions(-) diff --git a/src/lava/lib/optimization/solvers/generic/annealing/process.py b/src/lava/lib/optimization/solvers/generic/annealing/process.py index c3ce0fc8..97474e86 100644 --- a/src/lava/lib/optimization/solvers/generic/annealing/process.py +++ b/src/lava/lib/optimization/solvers/generic/annealing/process.py @@ -80,6 +80,9 @@ def __init__( self.steps_per_temperature = Var( shape=shape, init=np.int_(steps_per_temperature) ) + self.temperature_counter = Var( + shape=shape, init=np.int_(steps_per_temperature) + ) @property def shape(self) -> ty.Tuple[int, ...]: diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py index 951500b9..003b10b8 100644 --- a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py @@ -86,7 +86,6 @@ def __init__( super().__init__(shape=shape, target_cost=target_cost, - timeout=timeout, name=name, log_config=log_config) self.cost_in = InPort(shape=shape) @@ -95,9 +94,9 @@ def __init__( self.best_timestep_out = OutPort(shape=shape) # Counter for timesteps - self.timestep = Var(shape=shape, init=0) + self.timestep_inverse = Var(shape=shape, init=timeout) # Storage for best current time step - self.best_timestep = Var(shape=shape, init=0) + self.best_timestep_inverse = Var(shape=shape, init=timeout) # Var to store current cost # Note: Total cost = cost_first_byte << 24 + cost_last_bytes diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 952becb6..45a49718 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -3,6 +3,8 @@ # See: https://spdx.org/licenses/ import numpy as np import itertools +import typing as ty +import numpy.typing as npty from lava.lib.optimization.solvers.qubo.solution_readout.process import ( SolutionReadoutEthernet @@ -36,49 +38,57 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): user, once this cost is reached by the solver network, this process will request the runtime service to pause execution. """ - best_state: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) best_timestep: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) best_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) num_message_bits: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) timeout: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) - - results_in: PyInPort = LavaPyType( - PyInPort.VEC_DENSE, np.int32, precision=32 - ) + results_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, 32) def run_async(self): + self.best_timestep[:] = 1 + self.best_cost[:] = 0 + self.best_state[:] = 0 num_message_bits = self.num_message_bits[0] num_vars = self.best_state.shape[0] timeout = self.timeout[0] - # Iterating for timeout - 1 because an additional step is used to # recv the state - for _ in itertools.repeat(None, timeout - 1): + while True: results_buffer = self.results_in.recv() - if self._check_if_input(results_buffer): - break - self.best_cost, self.best_timestep, _ = self._decompress_state( + if self._check_if_input(results_buffer): break + self.best_cost[:], self.best_timestep[:], _ = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, - num_vars=num_vars) - + num_vars=num_vars, + timeout=timeout + ) # best states are returned with a delay of 1 timestep results_buffer = self.results_in.recv() - _, _, states = self._decompress_state( + _, _, self.best_state = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, - num_vars=num_vars) - self.best_state = states + num_vars=num_vars, + timeout=timeout + ) @staticmethod - def _check_if_input(results_buffer): + def _check_if_input(results_buffer: np.ndarray) -> bool: return results_buffer[1] > 0 @staticmethod - def _decompress_state(compressed_states, num_message_bits, num_vars): + def _decompress_state( + compressed_states: np.ndarray, + num_message_bits: int, + num_vars: int, + timeout: int + ) -> ty.Tuple[int, int, np.ndarray]: """Add info!""" cost = int(compressed_states[0]) + # Explanation for this: + # The CostIntegrator initialized its inverse_timestep with timeout-2 + # It notices that a best solution has been found 1 timestep after the variable neurons + # At this time, it has already subtracted 1 from inverse_timestep twice timestep = int(compressed_states[1]) states = (compressed_states[2:, None] & ( 1 << np.arange(0, num_message_bits))) != 0 @@ -89,13 +99,15 @@ def _decompress_state(compressed_states, num_message_bits, num_vars): states = states.astype(np.int8).flatten()[:num_vars] return cost, timestep, states + @staticmethod + def postprocess_best_timestep(time_step, timeout) -> int: + return timeout - time_step - 3 + @implements(proc=SolutionReadoutEthernet, protocol=LoihiProtocol) @requires(CPU) class SolutionReadoutEthernetModel(AbstractSubProcessModel): - """Model for the SolutionReadout process. - - """ + """Model for the SolutionReadout process.""" def __init__(self, proc): num_message_bits = proc.proc_params.get("num_message_bits") @@ -222,10 +234,12 @@ def __init__(self, proc): proc.vars.timeout.alias(self.solution_receiver.timeout) @staticmethod - def _get_input_weights(num_vars, - num_spike_int, - num_vars_per_int, - weight_exp) -> csr_matrix: + def _get_input_weights( + num_vars: int, + num_spike_int: int, + num_vars_per_int: int, + weight_exp: int + ) -> csr_matrix: """To be verified. Deprecated due to efficiency""" weights = np.zeros((num_spike_int, num_vars), dtype=np.uint8) @@ -246,7 +260,11 @@ def _get_input_weights(num_vars, return csr_matrix(weights) @staticmethod - def _get_state_in_weights_index(num_vars, num_spike_int, num_vars_per_int): + def _get_state_in_weights_index( + num_vars: int, + num_spike_int: int, + num_vars_per_int: int + ) -> np.ndarray: """To be verified""" weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) From 1327f4eecf470aacc98dc93a0ac23175a2236c92 Mon Sep 17 00:00:00 2001 From: "Pierro, Alessandro" Date: Tue, 13 Feb 2024 03:36:52 -0800 Subject: [PATCH 31/34] Add printing to SolutionReadout --- .../lib/optimization/solvers/qubo/solution_readout/models.py | 5 +++++ .../optimization/solvers/qubo/solution_readout/process.py | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 45a49718..20eec277 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -71,6 +71,11 @@ def run_async(self): num_vars=num_vars, timeout=timeout ) + print("==============================================================") + print("Solution found!") + print(f"Best cost: {int(self.best_cost[0])}") + print(f"Best state: {self.best_state.tolist()}") + print("==============================================================") @staticmethod def _check_if_input(results_buffer: np.ndarray) -> bool: diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py index a24dfafd..89713eef 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py @@ -98,7 +98,8 @@ def __init__( name=name, log_config=log_config, ) - + # Default values for best_cost, best_state, and best_timestep are also + # assigned in the proc models run_async method. self.states_in = InPort(shape=(num_bin_variables,)) self.cost_in = InPort((1,)) self.timestep_in = InPort((1,)) @@ -107,6 +108,7 @@ def __init__( self.best_cost = Var(shape=(1,), init=0) self.timeout = Var(shape=(1,), init=timeout) + class SolutionReceiver(AbstractProcess): """Process to readout solution from SNN and make it available on host. From 1fcf2421b1a46f05deae0350cd7662042a31f86a Mon Sep 17 00:00:00 2001 From: Philipp Stratmann Date: Thu, 15 Feb 2024 01:46:12 -0800 Subject: [PATCH 32/34] renamed best_state --- .../solvers/qubo/simulated_annealing/process.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py index 7e5046d1..cc015ed8 100644 --- a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py +++ b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py @@ -86,8 +86,10 @@ def __init__( # Storage for the best state. Will get updated whenever a better # state was found # Default is all zeros - self.best_state = Var(shape=shape, - init=np.zeros(shape=shape, dtype=int)) + self.best_variable_assignment = Var( + shape=shape, + init=np.zeros(shape=shape, dtype=int) + ) # Initial state determined in DiscreteVariables self.state = Var( shape=shape, From e2bcb812c8e475b76ae618a52a9e50959128a141 Mon Sep 17 00:00:00 2001 From: Philipp Stratmann <86950058+phstratmann@users.noreply.github.com> Date: Wed, 21 Feb 2024 14:39:28 +0100 Subject: [PATCH 33/34] Generalize SolutionReadout --- .../solvers/qubo/cost_integrator/process.py | 4 +- .../qubo/simulated_annealing/process.py | 2 +- .../solvers/qubo/solution_readout/models.py | 337 ++++++++++-------- .../solvers/qubo/solution_readout/process.py | 174 ++++++--- 4 files changed, 299 insertions(+), 218 deletions(-) diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py index 003b10b8..ce8cfbd1 100644 --- a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py +++ b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py @@ -23,10 +23,10 @@ class CostIntegrator(AbstractProcess): The expected number and topology of the input cost components. target_cost: int Target cost of the QUBO solver. Once reached, the best_cost, - best_timestep, and best_state are spiked out. + best_timestep, and best_variable_assignment are spiked out. timeout: int Timeout of the QUBO solver. Once reached, the best_cost, - best_timestep, and best_state are spiked out. + best_timestep, and best_variable_assignment are spiked out. name : str, optional Name of the Process. Default is 'Process_ID', where ID is an integer value that is determined automatically. diff --git a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py index cc015ed8..2426b2e8 100644 --- a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py +++ b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py @@ -68,7 +68,7 @@ def __init__( self.control_cost_integrator = InPort(shape=shape) self.s_sig_out = OutPort(shape=shape) self.s_wta_out = OutPort(shape=shape) - self.best_state_out = OutPort(shape=shape) + self.best_variable_assignment_out = OutPort(shape=shape) self.spk_hist = Var( shape=shape, init=(np.zeros(shape=shape) + init_value).astype(int) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py index 20eec277..a068a8fa 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py @@ -25,12 +25,14 @@ from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol from lava.proc.sparse.process import Sparse from scipy.sparse import csr_matrix +from abc import ABC, abstractmethod @implements(SolutionReceiver, protocol=AsyncProtocol) @requires(CPU) -class SolutionReceiverPyModel(PyAsyncProcessModel): +class SolutionReceiverAbstractPyModel(PyAsyncProcessModel, ABC): """CPU model for the SolutionReadout process. + This is the abstract class. The process receives two types of messages, an updated cost and the state of the solver network representing the current candidate solution to an @@ -38,75 +40,109 @@ class SolutionReceiverPyModel(PyAsyncProcessModel): user, once this cost is reached by the solver network, this process will request the runtime service to pause execution. """ - best_state: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) - best_timestep: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) - best_cost: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) + + variables_1bit: np.ndarray = LavaPyType(np.ndarray, np.uint8, 1) + variables_32bit: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) num_message_bits: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) timeout: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) results_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, 32) + @abstractmethod def run_async(self): - self.best_timestep[:] = 1 - self.best_cost[:] = 0 - self.best_state[:] = 0 + pass + + @staticmethod + def _decompress_state(compressed_states, + num_message_bits, + variables_1bit_num, + variables_32bit_num): + """Receives the output of a recv from SolutionReadout, and extracts + 32bit and 1bit variables!""" + + variables_32bit = compressed_states[:variables_32bit_num].astype( + np.int32) + + variables_1bit = (compressed_states[variables_32bit_num:, None] & ( + 1 << np.arange(0, num_message_bits))) != 0 + + # reshape into a 1D array + variables_1bit.reshape(-1) + # If n_vars is not a multiple of num_message_bits, then last entries + # must be cut off + variables_1bit = variables_1bit.astype( + np.int8).flatten()[:variables_1bit_num] + + return variables_32bit, variables_1bit + + +@implements(SolutionReceiver, protocol=AsyncProtocol) +@requires(CPU) +class SolutionReceiverQUBOPyModel(SolutionReceiverAbstractPyModel): + """CPU model for the SolutionReadout process. + This model is specific for the QUBO Solver. + + See docstring of parent class for more information + """ + + def run_async(self): + + # Get required user input num_message_bits = self.num_message_bits[0] - num_vars = self.best_state.shape[0] + variables_1bit_num = self.variables_1bit.shape[0] + variables_32bit_num = self.variables_32bit.shape[0] timeout = self.timeout[0] + + # Set default values, required only if the Process will be restarted + self.variables_32bit[1] = 1 + self.variables_32bit[0] = 0 + self.variables_1bit[:] = 0 + # Iterating for timeout - 1 because an additional step is used to # recv the state while True: results_buffer = self.results_in.recv() - if self._check_if_input(results_buffer): break - self.best_cost[:], self.best_timestep[:], _ = self._decompress_state( + + if self._check_if_input(results_buffer): + break + + results_buffer, _ = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, - num_vars=num_vars, - timeout=timeout - ) + variables_1bit_num=variables_1bit_num, + variables_32bit_num=variables_32bit_num) + self.variables_32bit = results_buffer + # best states are returned with a delay of 1 timestep results_buffer = self.results_in.recv() - _, _, self.best_state = self._decompress_state( + _, results_buffer = self._decompress_state( compressed_states=results_buffer, num_message_bits=num_message_bits, - num_vars=num_vars, - timeout=timeout - ) + variables_1bit_num=variables_1bit_num, + variables_32bit_num=variables_32bit_num) + self.variables_1bit = results_buffer + print("==============================================================") print("Solution found!") - print(f"Best cost: {int(self.best_cost[0])}") - print(f"Best state: {self.best_state.tolist()}") + print(f"{self.variables_32bit=}") + print(f"{self.variables_1bit=}") print("==============================================================") @staticmethod - def _check_if_input(results_buffer: np.ndarray) -> bool: - return results_buffer[1] > 0 + def _check_if_input(results_buffer) -> bool: + """For QUBO, we know that the readout starts as soon as the 2nd output + (best_timestep) is > 0.""" - @staticmethod - def _decompress_state( - compressed_states: np.ndarray, - num_message_bits: int, - num_vars: int, - timeout: int - ) -> ty.Tuple[int, int, np.ndarray]: - """Add info!""" - cost = int(compressed_states[0]) - # Explanation for this: - # The CostIntegrator initialized its inverse_timestep with timeout-2 - # It notices that a best solution has been found 1 timestep after the variable neurons - # At this time, it has already subtracted 1 from inverse_timestep twice - timestep = int(compressed_states[1]) - states = (compressed_states[2:, None] & ( - 1 << np.arange(0, num_message_bits))) != 0 - # reshape into a 1D array - states.reshape(-1) - # If n_vars is not a multiple of num_message_bits, then last entries - # must be cut off - states = states.astype(np.int8).flatten()[:num_vars] - return cost, timestep, states + return results_buffer[1] > 0 @staticmethod - def postprocess_best_timestep(time_step, timeout) -> int: - return timeout - time_step - 3 + def postprocess_variables_32bit( + variables_32bit, + timeout, + ) -> ty.Tuple[int, int]: + best_cost = variables_32bit[0] + best_timestep = variables_32bit[1] + best_timestep = timeout - best_timestep - 3 + return best_cost, best_timestep @implements(proc=SolutionReadoutEthernet, protocol=LoihiProtocol) @@ -119,180 +155,167 @@ def __init__(self, proc): timeout = proc.proc_params.get("timeout") - # Define the dense input layer - num_bin_variables = proc.proc_params.get("num_bin_variables") + variables_1bit_num = proc.variables_1bit.shape[0] + variables_32bit_num = proc.variables_32bit.shape[0] num_spike_integrators = proc.proc_params.get("num_spike_integrators") connection_config = proc.proc_params.get("connection_config") self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) - weights_state_in_0 = self._get_input_weights( - num_vars=num_bin_variables, + # Connect the 1bit binary neurons + + weights_variables_1bit_0_in = self._get_input_weights( + variables_1bit_num=variables_1bit_num, + variables_32bit_num=variables_32bit_num, num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits, + num_1bit_vars_per_int=num_message_bits, weight_exp=0 ) - self.synapses_state_in_0 = Sparse( - weights=weights_state_in_0, + self.synapses_variables_1bit_0_in = Sparse( + weights=weights_variables_1bit_0_in, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=0, ) - proc.in_ports.states_in.connect(self.synapses_state_in_0.s_in) - self.synapses_state_in_0.a_out.connect(self.spike_integrators.a_in) + proc.in_ports.variables_1bit_in.connect( + self.synapses_variables_1bit_0_in.s_in) + self.synapses_variables_1bit_0_in.a_out.connect( + self.spike_integrators.a_in) - if num_bin_variables > 8: - weights_state_in_1 = self._get_input_weights( - num_vars=num_bin_variables, + if variables_1bit_num > 8: + weights_variables_1bit_1_in = self._get_input_weights( + variables_1bit_num=variables_1bit_num, + variables_32bit_num=variables_32bit_num, num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits, + num_1bit_vars_per_int=num_message_bits, weight_exp=8 ) - self.synapses_state_in_1 = Sparse( - weights=weights_state_in_1, + self.synapses_variables_1bit_1_in = Sparse( + weights=weights_variables_1bit_1_in, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=8, ) - proc.in_ports.states_in.connect(self.synapses_state_in_1.s_in) - self.synapses_state_in_1.a_out.connect(self.spike_integrators.a_in) + proc.in_ports.variables_1bit_in.connect( + self.synapses_variables_1bit_1_in.s_in) + self.synapses_variables_1bit_1_in.a_out.connect( + self.spike_integrators.a_in) - if num_bin_variables > 16: - weights_state_in_2 = self._get_input_weights( - num_vars=num_bin_variables, + if variables_1bit_num > 16: + weights_variables_1bit_2_in = self._get_input_weights( + variables_1bit_num=variables_1bit_num, + variables_32bit_num=variables_32bit_num, num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits, + num_1bit_vars_per_int=num_message_bits, weight_exp=16 ) - self.synapses_state_in_2 = Sparse( - weights=weights_state_in_2, + self.synapses_variables_1bit_2_in = Sparse( + weights=weights_variables_1bit_2_in, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=16, ) - proc.in_ports.states_in.connect(self.synapses_state_in_2.s_in) - self.synapses_state_in_2.a_out.connect(self.spike_integrators.a_in) + proc.in_ports.variables_1bit_in.connect( + self.synapses_variables_1bit_2_in.s_in) + self.synapses_variables_1bit_2_in.a_out.connect( + self.spike_integrators.a_in) - if num_bin_variables > 24: - weights_state_in_3 = self._get_input_weights( - num_vars=num_bin_variables, + if variables_1bit_num > 24: + weights_variables_1bit_3_in = self._get_input_weights( + variables_1bit_num=variables_1bit_num, + variables_32bit_num=variables_32bit_num, num_spike_int=num_spike_integrators, - num_vars_per_int=num_message_bits, + num_1bit_vars_per_int=num_message_bits, weight_exp=24 ) - self.synapses_state_in_3 = Sparse( - weights=weights_state_in_3, + self.synapses_variables_1bit_3_in = Sparse( + weights=weights_variables_1bit_3_in, num_weight_bits=8, num_message_bits=num_message_bits, weight_exp=24, ) - proc.in_ports.states_in.connect(self.synapses_state_in_3.s_in) - self.synapses_state_in_3.a_out.connect(self.spike_integrators.a_in) - - # Connect the CostIntegrator - weights_cost_in = self._get_cost_in_weights( - num_spike_int=num_spike_integrators, - ) - self.synapses_cost_in = Sparse( - weights=weights_cost_in, - num_weight_bits=8, - num_message_bits=32, - ) - - weights_timestep_in = self._get_timestep_in_weights( - num_spike_int=num_spike_integrators, - ) - self.synapses_timestep_in = Sparse( - weights=weights_timestep_in, - num_weight_bits=8, - num_message_bits=32, - ) + proc.in_ports.variables_1bit_in.connect( + self.synapses_variables_1bit_3_in.s_in) + self.synapses_variables_1bit_3_in.a_out.connect( + self.spike_integrators.a_in) + + # Connect the 32bit InPorts, one by one + for ii in range(variables_32bit_num): + # Create the synapses for InPort ii as self. + synapses_in = Sparse( + weights=self._get_32bit_in_weights( + num_spike_int=num_spike_integrators, + var_index=ii), + num_weight_bits=8, + num_message_bits=32,) + setattr(self, f"synapses_variables_32bit_{ii}_in", synapses_in) - proc.in_ports.cost_in.connect(self.synapses_cost_in.s_in) - self.synapses_cost_in.a_out.connect(self.spike_integrators.a_in) - proc.in_ports.timestep_in.connect(self.synapses_timestep_in.s_in) - self.synapses_timestep_in.a_out.connect(self.spike_integrators.a_in) + getattr(proc.in_ports, + f"variables_32bit_{ii}_in").connect(synapses_in.s_in) + synapses_in.a_out.connect(self.spike_integrators.a_in) # Define and connect the SolutionReceiver self.solution_receiver = SolutionReceiver( shape=(1,), - num_variables=num_bin_variables, + timeout=timeout, + variables_1bit_num=variables_1bit_num, + variables_1bit_init=proc.variables_1bit.get(), + variables_32bit_num=variables_32bit_num, + variables_32bit_init=proc.variables_32bit.get(), num_spike_integrators=num_spike_integrators, num_message_bits=num_message_bits, - timeout=timeout, - best_cost_init=proc.best_cost.get(), - best_state_init=proc.best_state.get(), - best_timestep_init=proc.best_timestep.get() ) self.spike_integrators.s_out.connect( self.solution_receiver.results_in, connection_config) # Create aliases for variables - proc.vars.best_state.alias(self.solution_receiver.best_state) - proc.vars.best_timestep.alias(self.solution_receiver.best_timestep) - proc.vars.best_cost.alias(self.solution_receiver.best_cost) + proc.vars.variables_1bit.alias(self.solution_receiver.variables_1bit) + proc.vars.variables_32bit.alias(self.solution_receiver.variables_32bit) proc.vars.timeout.alias(self.solution_receiver.timeout) @staticmethod - def _get_input_weights( - num_vars: int, - num_spike_int: int, - num_vars_per_int: int, - weight_exp: int - ) -> csr_matrix: - """To be verified. Deprecated due to efficiency""" - - weights = np.zeros((num_spike_int, num_vars), dtype=np.uint8) - - # The first two SpikeIntegrators receive best_cost and best_timestep - for spike_integrator in range(2, num_spike_int - 1): - variable_start = num_vars_per_int * (spike_integrator - 2) + \ - weight_exp - weights[spike_integrator, variable_start:variable_start + 8] = \ - np.power(2, np.arange(8)) + def _get_input_weights(variables_1bit_num, + variables_32bit_num, + num_spike_int, + num_1bit_vars_per_int, + weight_exp) -> csr_matrix: + """Builds weight matrices from 1bit variable neurons to + SpikeIntegrators. For this, num_spike_int binary neurons are bundled + and converge onto 1 SpikeIntegrator. For efficiency reasons, this + function may get vectorized in the future.""" + + weights = np.zeros((num_spike_int, variables_1bit_num), dtype=np.uint8) + + # The first SpikeIntegrators receive 32bit variables + for spike_integrator_id in range(variables_32bit_num, + num_spike_int - 1): + variable_start = num_1bit_vars_per_int * ( + spike_integrator_id - variables_32bit_num) + weight_exp + weights[spike_integrator_id, + variable_start:variable_start + 8] = np.power(2, + np.arange(8)) # The last spike integrator might be connected by less than - # num_vars_per_int neurons - # This happens when mod(num_variables, num_vars_per_int) != 0 - variable_start = num_vars_per_int * (num_spike_int - 3) + weight_exp + # num_1bit_vars_per_int neurons + # This happens when mod(num_variables, num_1bit_vars_per_int) != 0 + variable_start = num_1bit_vars_per_int * ( + num_spike_int - variables_32bit_num - 1) + weight_exp weights[-1, variable_start:] = np.power(2, np.arange(weights.shape[1] - variable_start)) return csr_matrix(weights) @staticmethod - def _get_state_in_weights_index( - num_vars: int, - num_spike_int: int, - num_vars_per_int: int - ) -> np.ndarray: - """To be verified""" - weights = np.zeros((num_spike_int, num_vars), dtype=np.int8) - - # Compute the indices for setting the values to 1 - indices = np.arange(0, num_vars_per_int * (num_spike_int - 1), - num_vars_per_int) + def _get_32bit_in_weights(num_spike_int: int, var_index: int) -> csr_matrix: - # Set the values to 1 using array indexing - weights[:num_spike_int - 1, indices:indices + num_vars_per_int] = 1 + data = [1] + row = [var_index] + col = [0] - # Set the values for the last spike integrator - weights[-1, num_vars_per_int * (num_spike_int - 1):num_vars] = 1 - - return weights - - @staticmethod - def _get_cost_in_weights(num_spike_int: int) -> csr_matrix: - weights = np.zeros((num_spike_int, 1), dtype=int) - weights[0, 0] = 1 - return csr_matrix(weights) - - @staticmethod - def _get_timestep_in_weights(num_spike_int: int) -> csr_matrix: - weights = np.zeros((num_spike_int, 1), dtype=int) - weights[1, 0] = 1 - return csr_matrix(weights) + return csr_matrix((data, (row, col)), + shape=(num_spike_int, 1), + dtype=np.int8) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py index 89713eef..09ee8a89 100644 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py +++ b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py @@ -37,23 +37,21 @@ class SolutionReadoutEthernet(AbstractProcess): Attributes ---------- - best_state: Var - Best binary variables assignment. - best_cost: Var - Cost of best solution. - best_timestep: Var - Time step when best solution was found. + variables_1bit: Var + Binary variables assignment. + variables_1bit: Var + Values of 32 bit variables. Initiated by the parameter + variables_32bit_init. The shape is determined by variables_32bit_num. InPorts: ---------- - states_in: InPort + variables_1bit_in: InPort Receives the best binary (1bit) states. Shape is determined by the - number of - binary variables. - cost_in: InPort - Receives the best 32bit cost. - timestep_in: InPort - Receives the best 32bit timestep. + number of binary variables. + variables_32bit__in: InPort + Receives 32bit variable. The number of InPorts is defined by + variables_32bit_num. For each 32bit variable ii, there is a + corresponding InPort variables_32bit_ii_in dynamically created. OutPorts: ---------- @@ -63,7 +61,10 @@ def __init__( self, shape: ty.Tuple[int, ...], timeout: int, - num_bin_variables: int, + variables_1bit_num: int, + variables_1bit_init: ty.Union[int, ty.List[int]], + variables_32bit_num: int, + variables_32bit_init: ty.Union[int, ty.List[int]], connection_config: ConnectionConfig, num_message_bits=32, name: ty.Optional[str] = None, @@ -74,8 +75,14 @@ def __init__( ---------- shape: tuple A tuple of the form (number of variables, domain size). - num_bin_variables: int - The number of binary (1bit) variables. + timeout: int + After timeout time steps, the run will be stopped. + variables_1bit_num: int + The number of 1bit (binary) variables. + variables_32bit_num: int + The number of 32bit variables and ports. + variables_32bit_init: int, list[int] + The initial values for the 32bit variables. num_message_bits: int Defines the number of bits of a single message via spikeIO. Currently only tested for 32bits. @@ -83,83 +90,134 @@ def __init__( Name of the Process. Default is 'Process_ID', where ID is an integer value that is determined automatically. log_config: LogConfig, optional - Configuration options for logging.z""" + Configuration options for logging.z + """ + + self._validate_input(variables_32bit_num, variables_32bit_init) - num_spike_integrators = 2 + np.ceil( - num_bin_variables / num_message_bits).astype(int) + num_spike_integrators = variables_32bit_num + np.ceil( + variables_1bit_num / num_message_bits).astype(int) super().__init__( shape=shape, - num_spike_integrators=num_spike_integrators, - num_bin_variables=num_bin_variables, timeout=timeout, + num_spike_integrators=num_spike_integrators, num_message_bits=num_message_bits, connection_config=connection_config, name=name, log_config=log_config, ) - # Default values for best_cost, best_state, and best_timestep are also - # assigned in the proc models run_async method. - self.states_in = InPort(shape=(num_bin_variables,)) - self.cost_in = InPort((1,)) - self.timestep_in = InPort((1,)) - self.best_state = Var(shape=(num_bin_variables,), init=0) - self.best_timestep = Var(shape=(1,), init=1) - self.best_cost = Var(shape=(1,), init=0) + self.timeout = Var(shape=(1,), init=timeout) + # Generate Var and InPort for 1bit variables + # Default values for variables_1bit and variables_32bit are also + # assigned in the proc models run_async method + self.variables_1bit = Var(shape=(variables_1bit_num,), + init=variables_1bit_init) + self.variables_1bit_in = InPort(shape=(variables_1bit_num,)) + + # Generate Vars and Inports for 32bit variables + self.variables_32bit = Var(shape=(variables_32bit_num,), + init=variables_32bit_init) + # self.variables_32bit__in + for ii in range(variables_32bit_num): + setattr(self, f"variables_32bit_{ii}_in", InPort((1,))) + + def _validate_input(self, + variables_32bit_num, + variables_32bit_init) -> None: + + if isinstance(variables_32bit_init, int) and variables_32bit_num == 1: + return + elif (isinstance(variables_32bit_init, list) + and len(variables_32bit_init) == variables_32bit_num): + return + elif (isinstance(variables_32bit_init, np.ndarray) + and variables_32bit_init.shape[0] == variables_32bit_num): + return + else: + raise ValueError(f"The variables_32bit_num must match the number " + f"of {variables_32bit_init=} provided.") + class SolutionReceiver(AbstractProcess): - """Process to readout solution from SNN and make it available on host. + r"""Process which receives a solution via spikeIO on the superhost. Is + connected within a SolutionReadout process. + The way how information is processed is defined by the run_async of the + PyProcModel, which must be defined for each SNN separately. - Parameters + Attributes ---------- - shape: The shape of the set of nodes, or process, which state will be read. - target_cost: int - cost value at which, once attained by the network, this process will - stop execution. - name: str - Name of the Process. Default is 'Process_ID', where ID is an integer - value that is determined automatically. - log_config: - Configuration options for logging. + variables_1bit: Var + Binary variables assignment. + : Var + Values of 32 bit variables. Initiated by the parameter + variables_32bit_init. There will be one 32bit variable for each list + entry of variables_32bit_names. - Attributes + InPorts: ---------- - read_solution: InPort - A message received on this ports signifies the process - should call read on its RefPort. - ref_port: RefPort - A reference port to a variable in another process which state - will be remotely accessed upon read request. Here, it reads the - current variables assignment by a solver to an optimization problem. - target_cost: Var - Cost value at which, once attained by the network. + results_in: InPort + Receives all input from the SpikeIntegrators of a SolutionReadout + process. + OutPorts: + ------- """ def __init__( self, shape: ty.Tuple[int, ...], - num_variables: int, timeout: int, - best_cost_init: int, - best_state_init: ty.Union[npty.ArrayLike, int], + variables_1bit_num: int, + variables_1bit_init: ty.Union[npty.ArrayLike, int], + variables_32bit_num: int, + variables_32bit_init: ty.Union[npty.ArrayLike, int], num_spike_integrators: int, - best_timestep_init: int, - num_message_bits: int = 24, + num_message_bits: int, name: ty.Optional[str] = None, log_config: ty.Optional[LogConfig] = None, ) -> None: + """ + Parameters + ---------- + shape: tuple + A tuple of the form (number of variables, domain size). + timeout: int + After timeout time steps, the run will be stopped. + variables_1bit_num: int + The number of 1bit (binary) variables. + variables_1bit_init: int + The initial values of 1bit (binary) variables. + variables_32bit_num: int + The number of 32bit variables and ports. + variables_32bit_init: int, list[int] + The initial values for the 32bit variables. + num_message_bits: int + Defines the number of bits of a single message via spikeIO. + Currently only tested for 32bits. + name: str, optional + Name of the Process. Default is 'Process_ID', where ID is an + integer value that is determined automatically. + log_config: LogConfig, optional + Configuration options for logging.z + """ + super().__init__( shape=shape, name=name, log_config=log_config, ) - self.best_state = Var(shape=(num_variables,), init=best_state_init) - self.best_timestep = Var(shape=(1,), init=best_timestep_init) - self.best_cost = Var(shape=(1,), init=best_cost_init) self.num_message_bits = Var(shape=(1,), init=num_message_bits) self.timeout = Var(shape=(1,), init=timeout) + + # Define Vars + self.variables_1bit = Var(shape=(variables_1bit_num,), + init=variables_1bit_init) + self.variables_32bit = Var(shape=(variables_32bit_num,), + init=variables_32bit_init) + + # Define InPorts self.results_in = InPort(shape=(num_spike_integrators,)) From 4ea4180c77ce4a3aea4f3fafc7bdc29541dd4aaf Mon Sep 17 00:00:00 2001 From: Philipp Stratmann Date: Thu, 29 Feb 2024 05:09:31 -0800 Subject: [PATCH 34/34] remove QUBO --- .../solvers/qubo/cost_integrator/process.py | 127 ------- .../qubo/simulated_annealing/process.py | 159 --------- .../solvers/qubo/solution_readout/models.py | 321 ------------------ .../solvers/qubo/solution_readout/process.py | 223 ------------ 4 files changed, 830 deletions(-) delete mode 100644 src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py delete mode 100644 src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py delete mode 100644 src/lava/lib/optimization/solvers/qubo/solution_readout/models.py delete mode 100644 src/lava/lib/optimization/solvers/qubo/solution_readout/process.py diff --git a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py b/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py deleted file mode 100644 index ce8cfbd1..00000000 --- a/src/lava/lib/optimization/solvers/qubo/cost_integrator/process.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2022-2024 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ -import typing as ty - -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.process import AbstractProcess, LogConfig -from lava.magma.core.process.variable import Var - - -class CostIntegrator(AbstractProcess): - """Node that monitors execution of the QUBOSolver. It integrates the cost - components from all variables. Whenever a new better solution is found, - it stores the new best cost and the associated timestep, while triggering - the variable neurons to store the new best state. Waits for stopping - criteria to be reached, either target_cost or timeout. Once reached, - it spikes out the best cost, timestep, and a trigger for the variable - neurons to spike out the best state. - - Parameters - ---------- - shape : tuple(int) - The expected number and topology of the input cost components. - target_cost: int - Target cost of the QUBO solver. Once reached, the best_cost, - best_timestep, and best_variable_assignment are spiked out. - timeout: int - Timeout of the QUBO solver. Once reached, the best_cost, - best_timestep, and best_variable_assignment are spiked out. - name : str, optional - Name of the Process. Default is 'Process_ID', where ID is an - integer value that is determined automatically. - log_config: Configuration options for logging. - - InPorts - ------- - cost_in - input from the variable neurons. Added, this input denotes - the total cost of the current variable assignment. - - OutPorts - -------- - control_states_out - Port to the variable neurons. - Can send either of the following three values: - 1 -> store the state, since it is the new best state - 2 -> store the state and spike it, since stopping criteria reached - 3 -> spike the best state - best_cost_out - Port to the SolutionReadout. Sends the best cost found. - best_timestep_out - Port to the SolutionReadout. Sends the timestep when the best cost - was found. - - Vars - ---- - timestep - Holds current timestep - cost_min_last_bytes - Current minimum cost, i.e., the lowest reported cost so far. - Saves the last 3 bytes. - cost_min = cost_min_first_byte << 24 + cost_min_last_bytes - cost_min_first_byte - Current minimum cost, i.e., the lowest reported cost so far. - Saves the first byte. - cost_last_bytes - Current cost. - Saves the last 3 bytes. - cost_min = cost_min_first_byte << 24 + cost_min_last_bytes - cost_first_byte - Current cost. - Saves the first byte. - """ - - def __init__( - self, - *, - target_cost: int, - timeout: int, - shape: ty.Tuple[int, ...] = (1,), - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, - ) -> None: - self._input_validation(target_cost=target_cost, - timeout=timeout) - - super().__init__(shape=shape, - target_cost=target_cost, - name=name, - log_config=log_config) - self.cost_in = InPort(shape=shape) - self.control_states_out = OutPort(shape=shape) - self.best_cost_out = OutPort(shape=shape) - self.best_timestep_out = OutPort(shape=shape) - - # Counter for timesteps - self.timestep_inverse = Var(shape=shape, init=timeout) - # Storage for best current time step - self.best_timestep_inverse = Var(shape=shape, init=timeout) - - # Var to store current cost - # Note: Total cost = cost_first_byte << 24 + cost_last_bytes - # last 24 bit of cost - self.cost_last_bytes = Var(shape=shape, init=0) - # first 8 bit of cost - self.cost_first_byte = Var(shape=shape, init=0) - - # Var to store best cost found to far - # Note: Total min cost = cost_min_first_byte << 24 + cost_min_last_bytes - # last 24 bit of cost - self.cost_min_last_bytes = Var(shape=shape, init=0) - # first 8 bit of cost - self.cost_min_first_byte = Var(shape=shape, init=0) - - @staticmethod - def _input_validation(target_cost, timeout) -> None: - if (target_cost is None and timeout is None): - raise ValueError( - "Both the target_cost and the timeout must be defined") - if target_cost > 0 or target_cost < - 2 ** 31 + 1: - raise ValueError( - f"The target cost must in the range [-2**32 + 1, 0], " - f"but is {target_cost}.") - if timeout <= 0 or timeout > 2 ** 24 - 1: - raise ValueError( - f"The timeout must be in the range (0, 2**24 - 1], but is " - f"{timeout}.") diff --git a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py b/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py deleted file mode 100644 index 2426b2e8..00000000 --- a/src/lava/lib/optimization/solvers/qubo/simulated_annealing/process.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (C) 2022-2024 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ - -import numpy as np -import typing as ty -from numpy import typing as npty - -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.process import AbstractProcess -from lava.magma.core.process.variable import Var - - -class SimulatedAnnealingLocal(AbstractProcess): - """ - Non-equilibrium Boltzmann (NEBM) neuron model to solve QUBO problems. - This model uses purely information available at the level of individual - neurons to decide whether to switch or not, in contrast to the inheriting - Process NEBMSimulatedAnnealing. - """ - - def __init__( - self, - *, - shape: ty.Tuple[int, ...], - cost_diagonal: npty.ArrayLike, - max_temperature: npty.ArrayLike, - refract_scaling: ty.Union[npty.ArrayLike, None], - refract_seed: int, - init_value: npty.ArrayLike, - init_state: npty.ArrayLike, - ): - """ - SA Process. - - Parameters - ---------- - shape: Tuple - Number of neurons. Default is (1,). - - refract_scaling : ArrayLike - After a neuron has switched its binary variable, it remains in a - refractory state that prevents any variable switching for a - number of time steps. This number of time steps is determined by - rand(0, 255) >> refract_scaling - Refract_scaling thus denotes the order of magnitude of timesteps a - neuron remains in a state after a transition. - refract_seed : int - Random seed to initialize the refractory periods. Allows - repeatability. - init_value : ArrayLike - The spiking history with which the network is initialized - init_state : ArrayLike - The state of neurons with which the network is initialized - neuron_model : str - The neuron model to be used. The latest list of allowed values - can be found in NEBMSimulatedAnnealing.enabled_neuron_models. - """ - - super().__init__( - shape=shape, - cost_diagonal=cost_diagonal, - refract_scaling=refract_scaling, - ) - - self.a_in = InPort(shape=shape) - self.delta_temperature_in = InPort(shape=shape) - self.control_cost_integrator = InPort(shape=shape) - self.s_sig_out = OutPort(shape=shape) - self.s_wta_out = OutPort(shape=shape) - self.best_variable_assignment_out = OutPort(shape=shape) - - self.spk_hist = Var( - shape=shape, init=(np.zeros(shape=shape) + init_value).astype(int) - ) - - self.temperature = Var(shape=shape, init=int(max_temperature)) - - np.random.seed(refract_seed) - self.refract_counter = Var( - shape=shape, - init=0 + np.right_shift( - np.random.randint(0, 2**8, size=shape), (refract_scaling or 0) - ), - ) - # Storage for the best state. Will get updated whenever a better - # state was found - # Default is all zeros - self.best_variable_assignment = Var( - shape=shape, - init=np.zeros(shape=shape, dtype=int) - ) - # Initial state determined in DiscreteVariables - self.state = Var( - shape=shape, - init=init_state.astype(int) - if init_state is not None - else np.zeros(shape=shape, dtype=int), - ) - - @property - def shape(self) -> ty.Tuple[int, ...]: - return self.proc_params["shape"] - - -class SimulatedAnnealing(SimulatedAnnealingLocal): - """ - Non-equilibrium Boltzmann (NEBM) neuron model to solve QUBO problems. - This model combines the switching intentions of all NEBM neurons to - decide whether to switch or not, to avoid conflicting variable switches. - """ - - def __init__( - self, - *, - shape: ty.Tuple[int, ...], - cost_diagonal: npty.ArrayLike, - max_temperature: npty.ArrayLike, - init_value: npty.ArrayLike, - init_state: npty.ArrayLike, - ): - """ - SA Process. - - Parameters - ---------- - shape: Tuple - Number of neurons. Default is (1,). - - refract_scaling : ArrayLike - After a neuron has switched its binary variable, it remains in a - refractory state that prevents any variable switching for a - number of time steps. This number of time steps is determined by - rand(0, 255) >> refract_scaling - Refract_scaling thus denotes the order of magnitude of timesteps a - neuron remains in a state after a transition. - init_value : ArrayLike - The spiking history with which the network is initialized - init_state : ArrayLike - The state of neurons with which the network is initialized - neuron_model : str - The neuron model to be used. The latest list of allowed values - can be found in NEBMSimulatedAnnealing.enabled_neuron_models. - """ - - super().__init__( - shape=shape, - cost_diagonal=cost_diagonal, - max_temperature=max_temperature, - refract_scaling=None, - refract_seed=0, - init_value=init_value, - init_state=init_state, - ) - - # number of NEBM neurons that suggest switching in a time step - self.n_switches_in = InPort(shape=shape) - # port to notify other NEBM neurons of switching intentions - self.suggest_switch_out = OutPort(shape=shape) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py deleted file mode 100644 index a068a8fa..00000000 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/models.py +++ /dev/null @@ -1,321 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ -import numpy as np -import itertools -import typing as ty -import numpy.typing as npty - -from lava.lib.optimization.solvers.qubo.solution_readout.process import ( - SolutionReadoutEthernet -) -from lava.magma.core.decorator import implements, requires -from lava.magma.core.model.py.model import ( - PyAsyncProcessModel -) -from lava.magma.core.model.py.ports import PyInPort -from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.model.sub.model import AbstractSubProcessModel -from lava.magma.core.resources import CPU -from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol - -from lava.lib.optimization.solvers.qubo.solution_readout.process import ( - SolutionReceiver, SpikeIntegrator -) -from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol -from lava.proc.sparse.process import Sparse -from scipy.sparse import csr_matrix -from abc import ABC, abstractmethod - - -@implements(SolutionReceiver, protocol=AsyncProtocol) -@requires(CPU) -class SolutionReceiverAbstractPyModel(PyAsyncProcessModel, ABC): - """CPU model for the SolutionReadout process. - This is the abstract class. - The process receives two types of messages, an updated cost and the - state of - the solver network representing the current candidate solution to an - OptimizationProblem. Additionally, a target cost can be defined by the - user, once this cost is reached by the solver network, this process - will request the runtime service to pause execution. - """ - - variables_1bit: np.ndarray = LavaPyType(np.ndarray, np.uint8, 1) - variables_32bit: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) - num_message_bits: np.ndarray = LavaPyType(np.ndarray, np.int8, 32) - timeout: np.ndarray = LavaPyType(np.ndarray, np.int32, 32) - results_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.int32, 32) - - @abstractmethod - def run_async(self): - pass - - @staticmethod - def _decompress_state(compressed_states, - num_message_bits, - variables_1bit_num, - variables_32bit_num): - """Receives the output of a recv from SolutionReadout, and extracts - 32bit and 1bit variables!""" - - variables_32bit = compressed_states[:variables_32bit_num].astype( - np.int32) - - variables_1bit = (compressed_states[variables_32bit_num:, None] & ( - 1 << np.arange(0, num_message_bits))) != 0 - - # reshape into a 1D array - variables_1bit.reshape(-1) - # If n_vars is not a multiple of num_message_bits, then last entries - # must be cut off - variables_1bit = variables_1bit.astype( - np.int8).flatten()[:variables_1bit_num] - - return variables_32bit, variables_1bit - - -@implements(SolutionReceiver, protocol=AsyncProtocol) -@requires(CPU) -class SolutionReceiverQUBOPyModel(SolutionReceiverAbstractPyModel): - """CPU model for the SolutionReadout process. - This model is specific for the QUBO Solver. - - See docstring of parent class for more information - """ - - def run_async(self): - - # Get required user input - num_message_bits = self.num_message_bits[0] - variables_1bit_num = self.variables_1bit.shape[0] - variables_32bit_num = self.variables_32bit.shape[0] - timeout = self.timeout[0] - - # Set default values, required only if the Process will be restarted - self.variables_32bit[1] = 1 - self.variables_32bit[0] = 0 - self.variables_1bit[:] = 0 - - # Iterating for timeout - 1 because an additional step is used to - # recv the state - while True: - results_buffer = self.results_in.recv() - - if self._check_if_input(results_buffer): - break - - results_buffer, _ = self._decompress_state( - compressed_states=results_buffer, - num_message_bits=num_message_bits, - variables_1bit_num=variables_1bit_num, - variables_32bit_num=variables_32bit_num) - self.variables_32bit = results_buffer - - # best states are returned with a delay of 1 timestep - results_buffer = self.results_in.recv() - _, results_buffer = self._decompress_state( - compressed_states=results_buffer, - num_message_bits=num_message_bits, - variables_1bit_num=variables_1bit_num, - variables_32bit_num=variables_32bit_num) - self.variables_1bit = results_buffer - - print("==============================================================") - print("Solution found!") - print(f"{self.variables_32bit=}") - print(f"{self.variables_1bit=}") - print("==============================================================") - - @staticmethod - def _check_if_input(results_buffer) -> bool: - """For QUBO, we know that the readout starts as soon as the 2nd output - (best_timestep) is > 0.""" - - return results_buffer[1] > 0 - - @staticmethod - def postprocess_variables_32bit( - variables_32bit, - timeout, - ) -> ty.Tuple[int, int]: - best_cost = variables_32bit[0] - best_timestep = variables_32bit[1] - best_timestep = timeout - best_timestep - 3 - return best_cost, best_timestep - - -@implements(proc=SolutionReadoutEthernet, protocol=LoihiProtocol) -@requires(CPU) -class SolutionReadoutEthernetModel(AbstractSubProcessModel): - """Model for the SolutionReadout process.""" - - def __init__(self, proc): - num_message_bits = proc.proc_params.get("num_message_bits") - - timeout = proc.proc_params.get("timeout") - - variables_1bit_num = proc.variables_1bit.shape[0] - variables_32bit_num = proc.variables_32bit.shape[0] - num_spike_integrators = proc.proc_params.get("num_spike_integrators") - - connection_config = proc.proc_params.get("connection_config") - - self.spike_integrators = SpikeIntegrator(shape=(num_spike_integrators,)) - - # Connect the 1bit binary neurons - - weights_variables_1bit_0_in = self._get_input_weights( - variables_1bit_num=variables_1bit_num, - variables_32bit_num=variables_32bit_num, - num_spike_int=num_spike_integrators, - num_1bit_vars_per_int=num_message_bits, - weight_exp=0 - ) - self.synapses_variables_1bit_0_in = Sparse( - weights=weights_variables_1bit_0_in, - num_weight_bits=8, - num_message_bits=num_message_bits, - weight_exp=0, - ) - - proc.in_ports.variables_1bit_in.connect( - self.synapses_variables_1bit_0_in.s_in) - self.synapses_variables_1bit_0_in.a_out.connect( - self.spike_integrators.a_in) - - if variables_1bit_num > 8: - weights_variables_1bit_1_in = self._get_input_weights( - variables_1bit_num=variables_1bit_num, - variables_32bit_num=variables_32bit_num, - num_spike_int=num_spike_integrators, - num_1bit_vars_per_int=num_message_bits, - weight_exp=8 - ) - self.synapses_variables_1bit_1_in = Sparse( - weights=weights_variables_1bit_1_in, - num_weight_bits=8, - num_message_bits=num_message_bits, - weight_exp=8, - ) - - proc.in_ports.variables_1bit_in.connect( - self.synapses_variables_1bit_1_in.s_in) - self.synapses_variables_1bit_1_in.a_out.connect( - self.spike_integrators.a_in) - - if variables_1bit_num > 16: - weights_variables_1bit_2_in = self._get_input_weights( - variables_1bit_num=variables_1bit_num, - variables_32bit_num=variables_32bit_num, - num_spike_int=num_spike_integrators, - num_1bit_vars_per_int=num_message_bits, - weight_exp=16 - ) - self.synapses_variables_1bit_2_in = Sparse( - weights=weights_variables_1bit_2_in, - num_weight_bits=8, - num_message_bits=num_message_bits, - weight_exp=16, - ) - - proc.in_ports.variables_1bit_in.connect( - self.synapses_variables_1bit_2_in.s_in) - self.synapses_variables_1bit_2_in.a_out.connect( - self.spike_integrators.a_in) - - if variables_1bit_num > 24: - weights_variables_1bit_3_in = self._get_input_weights( - variables_1bit_num=variables_1bit_num, - variables_32bit_num=variables_32bit_num, - num_spike_int=num_spike_integrators, - num_1bit_vars_per_int=num_message_bits, - weight_exp=24 - ) - self.synapses_variables_1bit_3_in = Sparse( - weights=weights_variables_1bit_3_in, - num_weight_bits=8, - num_message_bits=num_message_bits, - weight_exp=24, - ) - proc.in_ports.variables_1bit_in.connect( - self.synapses_variables_1bit_3_in.s_in) - self.synapses_variables_1bit_3_in.a_out.connect( - self.spike_integrators.a_in) - - # Connect the 32bit InPorts, one by one - for ii in range(variables_32bit_num): - # Create the synapses for InPort ii as self. - synapses_in = Sparse( - weights=self._get_32bit_in_weights( - num_spike_int=num_spike_integrators, - var_index=ii), - num_weight_bits=8, - num_message_bits=32,) - setattr(self, f"synapses_variables_32bit_{ii}_in", synapses_in) - - getattr(proc.in_ports, - f"variables_32bit_{ii}_in").connect(synapses_in.s_in) - synapses_in.a_out.connect(self.spike_integrators.a_in) - - # Define and connect the SolutionReceiver - self.solution_receiver = SolutionReceiver( - shape=(1,), - timeout=timeout, - variables_1bit_num=variables_1bit_num, - variables_1bit_init=proc.variables_1bit.get(), - variables_32bit_num=variables_32bit_num, - variables_32bit_init=proc.variables_32bit.get(), - num_spike_integrators=num_spike_integrators, - num_message_bits=num_message_bits, - ) - - self.spike_integrators.s_out.connect( - self.solution_receiver.results_in, connection_config) - - # Create aliases for variables - proc.vars.variables_1bit.alias(self.solution_receiver.variables_1bit) - proc.vars.variables_32bit.alias(self.solution_receiver.variables_32bit) - proc.vars.timeout.alias(self.solution_receiver.timeout) - - @staticmethod - def _get_input_weights(variables_1bit_num, - variables_32bit_num, - num_spike_int, - num_1bit_vars_per_int, - weight_exp) -> csr_matrix: - """Builds weight matrices from 1bit variable neurons to - SpikeIntegrators. For this, num_spike_int binary neurons are bundled - and converge onto 1 SpikeIntegrator. For efficiency reasons, this - function may get vectorized in the future.""" - - weights = np.zeros((num_spike_int, variables_1bit_num), dtype=np.uint8) - - # The first SpikeIntegrators receive 32bit variables - for spike_integrator_id in range(variables_32bit_num, - num_spike_int - 1): - variable_start = num_1bit_vars_per_int * ( - spike_integrator_id - variables_32bit_num) + weight_exp - weights[spike_integrator_id, - variable_start:variable_start + 8] = np.power(2, - np.arange(8)) - # The last spike integrator might be connected by less than - # num_1bit_vars_per_int neurons - # This happens when mod(num_variables, num_1bit_vars_per_int) != 0 - variable_start = num_1bit_vars_per_int * ( - num_spike_int - variables_32bit_num - 1) + weight_exp - weights[-1, variable_start:] = np.power(2, np.arange(weights.shape[1] - - variable_start)) - - return csr_matrix(weights) - - @staticmethod - def _get_32bit_in_weights(num_spike_int: int, var_index: int) -> csr_matrix: - - data = [1] - row = [var_index] - col = [0] - - return csr_matrix((data, (row, col)), - shape=(num_spike_int, 1), - dtype=np.int8) diff --git a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py b/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py deleted file mode 100644 index 09ee8a89..00000000 --- a/src/lava/lib/optimization/solvers/qubo/solution_readout/process.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (C) 2023-2024 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause -# See: https://spdx.org/licenses/ -import numpy as np -import typing as ty -import numpy.typing as npty - -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.process.process import AbstractProcess, LogConfig -from lava.magma.core.process.variable import Var - -from lava.magma.core.process.ports.connection_config import ConnectionConfig - - -class SpikeIntegrator(AbstractProcess): - """GradedVec - Graded spike vector layer. Accumulates and forwards 32bit spikes. - - Parameters - ---------- - shape: tuple(int) - number and topology of neurons - """ - - def __init__( - self, - shape: ty.Tuple[int, ...]) -> None: - super().__init__(shape=shape) - - self.a_in = InPort(shape=shape) - self.s_out = OutPort(shape=shape) - - -class SolutionReadoutEthernet(AbstractProcess): - r"""Process which implements the solution readout layer - on the solver of an optimization problem. - - Attributes - ---------- - variables_1bit: Var - Binary variables assignment. - variables_1bit: Var - Values of 32 bit variables. Initiated by the parameter - variables_32bit_init. The shape is determined by variables_32bit_num. - - InPorts: - ---------- - variables_1bit_in: InPort - Receives the best binary (1bit) states. Shape is determined by the - number of binary variables. - variables_32bit__in: InPort - Receives 32bit variable. The number of InPorts is defined by - variables_32bit_num. For each 32bit variable ii, there is a - corresponding InPort variables_32bit_ii_in dynamically created. - - OutPorts: - ---------- - """ - - def __init__( - self, - shape: ty.Tuple[int, ...], - timeout: int, - variables_1bit_num: int, - variables_1bit_init: ty.Union[int, ty.List[int]], - variables_32bit_num: int, - variables_32bit_init: ty.Union[int, ty.List[int]], - connection_config: ConnectionConfig, - num_message_bits=32, - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, - ) -> None: - """ - Parameters - ---------- - shape: tuple - A tuple of the form (number of variables, domain size). - timeout: int - After timeout time steps, the run will be stopped. - variables_1bit_num: int - The number of 1bit (binary) variables. - variables_32bit_num: int - The number of 32bit variables and ports. - variables_32bit_init: int, list[int] - The initial values for the 32bit variables. - num_message_bits: int - Defines the number of bits of a single message via spikeIO. - Currently only tested for 32bits. - name: str, optional - Name of the Process. Default is 'Process_ID', where ID is an integer - value that is determined automatically. - log_config: LogConfig, optional - Configuration options for logging.z - """ - - self._validate_input(variables_32bit_num, variables_32bit_init) - - num_spike_integrators = variables_32bit_num + np.ceil( - variables_1bit_num / num_message_bits).astype(int) - - super().__init__( - shape=shape, - timeout=timeout, - num_spike_integrators=num_spike_integrators, - num_message_bits=num_message_bits, - connection_config=connection_config, - name=name, - log_config=log_config, - ) - - self.timeout = Var(shape=(1,), init=timeout) - - # Generate Var and InPort for 1bit variables - # Default values for variables_1bit and variables_32bit are also - # assigned in the proc models run_async method - self.variables_1bit = Var(shape=(variables_1bit_num,), - init=variables_1bit_init) - self.variables_1bit_in = InPort(shape=(variables_1bit_num,)) - - # Generate Vars and Inports for 32bit variables - self.variables_32bit = Var(shape=(variables_32bit_num,), - init=variables_32bit_init) - # self.variables_32bit__in - for ii in range(variables_32bit_num): - setattr(self, f"variables_32bit_{ii}_in", InPort((1,))) - - def _validate_input(self, - variables_32bit_num, - variables_32bit_init) -> None: - - if isinstance(variables_32bit_init, int) and variables_32bit_num == 1: - return - elif (isinstance(variables_32bit_init, list) - and len(variables_32bit_init) == variables_32bit_num): - return - elif (isinstance(variables_32bit_init, np.ndarray) - and variables_32bit_init.shape[0] == variables_32bit_num): - return - else: - raise ValueError(f"The variables_32bit_num must match the number " - f"of {variables_32bit_init=} provided.") - - -class SolutionReceiver(AbstractProcess): - r"""Process which receives a solution via spikeIO on the superhost. Is - connected within a SolutionReadout process. - The way how information is processed is defined by the run_async of the - PyProcModel, which must be defined for each SNN separately. - - Attributes - ---------- - variables_1bit: Var - Binary variables assignment. - : Var - Values of 32 bit variables. Initiated by the parameter - variables_32bit_init. There will be one 32bit variable for each list - entry of variables_32bit_names. - - InPorts: - ---------- - results_in: InPort - Receives all input from the SpikeIntegrators of a SolutionReadout - process. - - OutPorts: - ------- - """ - - def __init__( - self, - shape: ty.Tuple[int, ...], - timeout: int, - variables_1bit_num: int, - variables_1bit_init: ty.Union[npty.ArrayLike, int], - variables_32bit_num: int, - variables_32bit_init: ty.Union[npty.ArrayLike, int], - num_spike_integrators: int, - num_message_bits: int, - name: ty.Optional[str] = None, - log_config: ty.Optional[LogConfig] = None, - ) -> None: - """ - Parameters - ---------- - shape: tuple - A tuple of the form (number of variables, domain size). - timeout: int - After timeout time steps, the run will be stopped. - variables_1bit_num: int - The number of 1bit (binary) variables. - variables_1bit_init: int - The initial values of 1bit (binary) variables. - variables_32bit_num: int - The number of 32bit variables and ports. - variables_32bit_init: int, list[int] - The initial values for the 32bit variables. - num_message_bits: int - Defines the number of bits of a single message via spikeIO. - Currently only tested for 32bits. - name: str, optional - Name of the Process. Default is 'Process_ID', where ID is an - integer value that is determined automatically. - log_config: LogConfig, optional - Configuration options for logging.z - """ - - super().__init__( - shape=shape, - name=name, - log_config=log_config, - ) - - self.num_message_bits = Var(shape=(1,), init=num_message_bits) - self.timeout = Var(shape=(1,), init=timeout) - - # Define Vars - self.variables_1bit = Var(shape=(variables_1bit_num,), - init=variables_1bit_init) - self.variables_32bit = Var(shape=(variables_32bit_num,), - init=variables_32bit_init) - - # Define InPorts - self.results_in = InPort(shape=(num_spike_integrators,))