diff --git a/.pylint_dict.txt b/.pylint_dict.txt index 564be9322d..cc275a41d8 100644 --- a/.pylint_dict.txt +++ b/.pylint_dict.txt @@ -38,6 +38,7 @@ analog componentised config heatmap +multiplicator generatable multapse multiplicator @@ -90,10 +91,26 @@ ProgressBar RandomDistribution SourceSegmentedSDRAMMachinePartition SpinnmanIOException +PopulationApplicationVertex +AbstractSynapseType SynapseInformation SynapseRegionReferences SynapseRegions VariableSDRAM +MultiRegionSDRAM +AbstractMulticastControllableDevice +CommonRegions +NeuronRegions +MDSlice +DataSpecificationGenerator +CoreSubset +CoreSubsets +SpinnmanIOException +AbstractCurrentSource +GeneratorData +EIEIOType +EIEIOPrefix +RandomDistribtuion # Others' Python types (including PYNN terms) AnalogSignal diff --git a/spynnaker/pyNN/connections/spynnaker_poisson_control_connection.py b/spynnaker/pyNN/connections/spynnaker_poisson_control_connection.py index f569e920f9..f4701a7fdf 100644 --- a/spynnaker/pyNN/connections/spynnaker_poisson_control_connection.py +++ b/spynnaker/pyNN/connections/spynnaker_poisson_control_connection.py @@ -18,7 +18,8 @@ from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.utilities.connections.live_event_connection \ import ( - LiveEventConnection, _Callback, _InitCallback) + LiveEventConnection, _Callback, _InitCallback, _RcvCallback, + _RcvTimeCallback) from spinn_front_end_common.utilities.exceptions import ConfigurationException from spinn_front_end_common.utilities.constants import NOTIFY_PORT @@ -124,13 +125,15 @@ def add_init_callback(self, label: str, init_callback: _InitCallback): @overrides(LiveEventConnection.add_receive_callback) def add_receive_callback( - self, label, live_event_callback, translate_key=False): + self, label: str, live_event_callback: _RcvTimeCallback, + translate_key: bool = True): raise ConfigurationException( "SpynnakerPoissonControlPopulation can't receive data") @overrides(LiveEventConnection.add_receive_no_time_callback) def add_receive_no_time_callback( - self, label, live_event_callback, translate_key=True): + self, label: str, live_event_callback: _RcvCallback, + translate_key: bool = True): raise ConfigurationException( "SpynnakerPoissonControlPopulation can't receive data") diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index 6009a2f6e8..3d870b2777 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -12,12 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional from spinn_utilities.overrides import overrides from spinn_front_end_common.utilities.exceptions import ConfigurationException -from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard +from spynnaker.pyNN.extra_algorithms.splitter_components import ( + SplitterAbstractPopulationVertex) +from spynnaker.pyNN.models.neuron import ( + AbstractPopulationVertex, AbstractPyNNNeuronModelStandard) from spynnaker.pyNN.models.defaults import ( default_initial_values, default_parameters) from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent +from spynnaker.pyNN.models.neuron.implementations import NeuronImplStandard from spynnaker.pyNN.models.neuron.neuron_models import ( NeuronModelLeakyIntegrateAndFire) from spynnaker.pyNN.models.neuron.synapse_types import SynapseTypeExponential @@ -96,18 +101,24 @@ def __init__( @overrides(AbstractPyNNNeuronModelStandard.create_vertex) def create_vertex( - self, n_neurons, label, *, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size, - n_steps_per_timestep, drop_late_spikes, splitter, seed, - n_colour_bits): + self, n_neurons: int, label: str, *, + spikes_per_second: Optional[float] = None, + ring_buffer_sigma: Optional[float] = None, + incoming_spike_buffer_size: Optional[int] = None, + drop_late_spikes: Optional[bool] = None, + splitter: Optional[SplitterAbstractPopulationVertex] = None, + seed: Optional[int] = None, n_colour_bits: Optional[int] = None, + n_steps_per_timestep: int = 1) -> AbstractPopulationVertex: if n_neurons != len(self._devices): raise ConfigurationException( "Number of neurons does not match number of " f"devices in {label}") - self._model.n_steps_per_timestep = n_steps_per_timestep + model = self._model + assert isinstance(model, NeuronImplStandard) + model.n_steps_per_timestep = n_steps_per_timestep max_atoms = self.get_model_max_atoms_per_dimension_per_core() return ExternalDeviceLifControlVertex( - self._devices, self._create_edges, max_atoms, self._model, self, + self._devices, self._create_edges, max_atoms, model, self, self._translator, spikes_per_second, label, ring_buffer_sigma, incoming_spike_buffer_size, drop_late_spikes, splitter, seed, n_colour_bits) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index 1504a992cf..cc75481233 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -26,6 +26,8 @@ from .abstract_multicast_controllable_device import ( AbstractMulticastControllableDevice) if TYPE_CHECKING: + from pacman.model.graphs.machine.machine_vertex import MachineVertex + from pacman.model.routing_info.routing_info import RoutingInfo from spynnaker.pyNN.models.neuron.implementations import AbstractNeuronImpl from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModel from .abstract_ethernet_translator import AbstractEthernetTranslator @@ -53,7 +55,7 @@ class ExternalDeviceLifControlVertex( def __init__( self, devices: Sequence[AbstractMulticastControllableDevice], - create_edges: bool, max_atoms_per_core: int, + create_edges: bool, max_atoms_per_core: Tuple[int, ...], neuron_impl: AbstractNeuronImpl, pynn_model: AbstractPyNNNeuronModel, translator: Optional[AbstractEthernetTranslator] = None, @@ -61,7 +63,7 @@ def __init__( label: Optional[str] = None, ring_buffer_sigma: Optional[float] = None, incoming_spike_buffer_size: Optional[int] = None, - drop_late_spikes: bool = False, + drop_late_spikes: Optional[bool] = None, splitter: Optional[SplitterAbstractPopulationVertex] = None, seed: Optional[int] = None, n_colour_bits: Optional[int] = None): """ @@ -71,7 +73,7 @@ def __init__( :param bool create_edges: True if edges to the devices should be added by this dev (set to False if using the dev over Ethernet using a translator) - :param int max_atoms_per_core: + :param tuple(int, ...) max_atoms_per_core: :param AbstractNeuronImpl neuron_impl: :param AbstractPyNNNeuronModel pynn_model: :param translator: @@ -87,6 +89,8 @@ def __init__( :param int n_colour_bits: The number of colour bits to use """ # pylint: disable=too-many-arguments + if drop_late_spikes is None: + drop_late_spikes = False super().__init__( len(devices), f"ext_dev{devices}" if label is None else label, max_atoms_per_core, @@ -146,14 +150,15 @@ def get_outgoing_partition_ids(self) -> List[str]: @overrides(HasCustomAtomKeyMap.get_atom_key_map) def get_atom_key_map( - self, pre_vertex, partition_id: str, routing_info) -> List[ - Tuple[int, int]]: + self, pre_vertex: MachineVertex, partition_id: str, + routing_info: RoutingInfo) -> Iterable[Tuple[int, int]]: index = self.__indices[partition_id] device = self.__devices[partition_id] return [(index, device.device_control_key)] @overrides(AbstractPopulationVertex.get_fixed_key_and_mask) - def get_fixed_key_and_mask(self, partition_id: str): + def get_fixed_key_and_mask( + self, partition_id: str) -> Optional[BaseKeyAndMask]: return BaseKeyAndMask( self.__devices[partition_id].device_control_key, self._DEFAULT_COMMAND_MASK) diff --git a/spynnaker/pyNN/external_devices_models/external_spinnaker_link_fpga_retina_device.py b/spynnaker/pyNN/external_devices_models/external_spinnaker_link_fpga_retina_device.py index cbeafaf044..a57747e9f5 100644 --- a/spynnaker/pyNN/external_devices_models/external_spinnaker_link_fpga_retina_device.py +++ b/spynnaker/pyNN/external_devices_models/external_spinnaker_link_fpga_retina_device.py @@ -101,7 +101,7 @@ def __init__( self.__fixed_mask = self._get_mask(mode) @overrides(ApplicationSpiNNakerLinkVertex.get_fixed_key_and_mask) - def get_fixed_key_and_mask(self, partition_id) -> BaseKeyAndMask: + def get_fixed_key_and_mask(self, partition_id: str) -> BaseKeyAndMask: return BaseKeyAndMask(self.__fixed_key, self.__fixed_mask) def _get_mask(self, mode: str) -> int: diff --git a/spynnaker/pyNN/external_devices_models/icub_retina_device.py b/spynnaker/pyNN/external_devices_models/icub_retina_device.py index de840a31cc..14738738b0 100644 --- a/spynnaker/pyNN/external_devices_models/icub_retina_device.py +++ b/spynnaker/pyNN/external_devices_models/icub_retina_device.py @@ -18,6 +18,7 @@ from spinn_utilities.log import FormatAdapter from pacman.model.graphs.application import Application2DSpiNNakerLinkVertex from pacman.model.graphs.common import Slice +from pacman.model.graphs.machine import MachineVertex from pacman.model.routing_info.base_key_and_mask import BaseKeyAndMask from pacman.utilities.constants import BITS_IN_KEY from pacman.utilities.utility_calls import is_power_of_2 @@ -102,13 +103,15 @@ def get_incoming_slice(self, index: int) -> Slice: return vertex_slice @overrides(Application2DSpiNNakerLinkVertex.get_machine_fixed_key_and_mask) - def get_machine_fixed_key_and_mask(self, machine_vertex, partition_id): + def get_machine_fixed_key_and_mask( + self, machine_vertex: MachineVertex, + partition_id: str) -> BaseKeyAndMask: vertex_slice = machine_vertex.vertex_slice index = self.__index_by_slice[vertex_slice] return self._get_key_and_mask(self.__base_key, index) @overrides(Application2DSpiNNakerLinkVertex.get_fixed_key_and_mask) - def get_fixed_key_and_mask(self, partition_id) -> BaseKeyAndMask: + def get_fixed_key_and_mask(self, partition_id: str) -> BaseKeyAndMask: n_key_bits = BITS_IN_KEY - self._key_shift key_mask = ((1 << n_key_bits) - 1) << self._key_shift return BaseKeyAndMask(self.__base_key << self._key_shift, key_mask) diff --git a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py index d5210c68ac..9c24b99d7f 100644 --- a/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py +++ b/spynnaker/pyNN/external_devices_models/machine_munich_motor_device.py @@ -12,15 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Sequence from spinn_utilities.overrides import overrides from spinnman.model.enums import ExecutableType from pacman.model.graphs.common import Slice from pacman.model.graphs.machine import MachineVertex +from pacman.model.placements import Placement from pacman.model.resources import ConstantSDRAM from spinn_front_end_common.abstract_models import ( AbstractHasAssociatedBinary) from spinn_front_end_common.abstract_models import ( AbstractGeneratesDataSpecification) +from spinn_front_end_common.interface.ds import DataSpecificationGenerator from spinn_front_end_common.interface.provenance import ( ProvidesProvenanceDataFromMachineImpl, ProvenanceWriter) from spinn_front_end_common.interface.simulation import simulation_utilities @@ -91,33 +94,34 @@ def __init__( @property @overrides(MachineVertex.sdram_required) - def sdram_required(self): + def sdram_required(self) -> ConstantSDRAM: return ConstantSDRAM( SYSTEM_BYTES_REQUIREMENT + self._PARAMS_SIZE + self.get_provenance_data_size(self._PROVENANCE_ELEMENTS)) @overrides(AbstractHasAssociatedBinary.get_binary_file_name) - def get_binary_file_name(self): + def get_binary_file_name(self) -> str: return "robot_motor_control.aplx" @overrides(AbstractHasAssociatedBinary.get_binary_start_type) - def get_binary_start_type(self): + def get_binary_start_type(self) -> ExecutableType: return ExecutableType.USES_SIMULATION_INTERFACE @property @overrides(ProvidesProvenanceDataFromMachineImpl._provenance_region_id) - def _provenance_region_id(self): + def _provenance_region_id(self) -> int: return self._PROVENANCE_REGION @property @overrides(ProvidesProvenanceDataFromMachineImpl._n_additional_data_items) - def _n_additional_data_items(self): + def _n_additional_data_items(self) -> int: return self._PROVENANCE_ELEMENTS @overrides( ProvidesProvenanceDataFromMachineImpl.parse_extra_provenance_items) def parse_extra_provenance_items( - self, label, x, y, p, provenance_data): + self, label: str, x: int, y: int, p: int, + provenance_data: Sequence[int]): n_buffer_overflows, = provenance_data with ProvenanceWriter() as db: @@ -133,7 +137,8 @@ def parse_extra_provenance_items( "or decrease the number of neurons per core.") @overrides(AbstractGeneratesDataSpecification.generate_data_specification) - def generate_data_specification(self, spec, placement): + def generate_data_specification( + self, spec: DataSpecificationGenerator, placement: Placement): # reserve regions self.reserve_memory_regions(spec) @@ -142,8 +147,10 @@ def generate_data_specification(self, spec, placement): # handle simulation data spec.switch_write_focus(self._SYSTEM_REGION) + vertex = placement.vertex + assert isinstance(vertex, AbstractHasAssociatedBinary) spec.write_array(simulation_utilities.get_simulation_header_array( - placement.vertex.get_binary_file_name())) + vertex.get_binary_file_name())) # Get the key routing_info = SpynnakerDataView.get_routing_infos() diff --git a/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py b/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py index 77b42c5cd3..261d949b96 100644 --- a/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py +++ b/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_motor_device.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable from spinn_utilities.overrides import overrides from pacman.model.graphs.application import ( ApplicationSpiNNakerLinkVertex) +from pacman.model.graphs.application import ApplicationVertex from pacman.model.graphs.application.abstract import ( AbstractOneAppOneMachineVertex) from spinn_front_end_common.abstract_models import ( @@ -73,10 +75,11 @@ def __init__( _MunichMotorDevice(spinnaker_link_id, board_address)] @overrides(AbstractVertexWithEdgeToDependentVertices.dependent_vertices) - def dependent_vertices(self): + def dependent_vertices(self) -> Iterable[ApplicationVertex]: return self.__dependent_vertices @overrides(AbstractVertexWithEdgeToDependentVertices. edge_partition_identifiers_for_dependent_vertex) - def edge_partition_identifiers_for_dependent_vertex(self, vertex): + def edge_partition_identifiers_for_dependent_vertex( + self, vertex: ApplicationVertex) -> Iterable[str]: yield self.machine_vertex.MOTOR_PARTITION_ID diff --git a/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_retina_device.py b/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_retina_device.py index 2731d2e389..8c53556960 100644 --- a/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_retina_device.py +++ b/spynnaker/pyNN/external_devices_models/munich_spinnaker_link_retina_device.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable, List from spinn_utilities.overrides import overrides from pacman.model.routing_info import BaseKeyAndMask from pacman.model.graphs.application import ApplicationSpiNNakerLinkVertex @@ -114,12 +115,12 @@ def __init__( label=label, board_address=board_address) @overrides(ApplicationSpiNNakerLinkVertex.get_fixed_key_and_mask) - def get_fixed_key_and_mask(self, partition_id): + def get_fixed_key_and_mask(self, partition_id: str) -> BaseKeyAndMask: return BaseKeyAndMask(self.__fixed_key, self.__fixed_mask) @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # change the retina key it transmits with # (based off if its right or left) key_set_command = self._MANAGEMENT_BIT | ( @@ -144,7 +145,7 @@ def start_resume_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: # disable retina disable_command = self._MANAGEMENT_BIT | ( self._RIGHT_RETINA_DISABLE if self.__is_right @@ -155,5 +156,5 @@ def pause_stop_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] diff --git a/spynnaker/pyNN/external_devices_models/push_bot/abstract_push_bot_retina_device.py b/spynnaker/pyNN/external_devices_models/push_bot/abstract_push_bot_retina_device.py index 3364abf938..6f9bd2ec15 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/abstract_push_bot_retina_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/abstract_push_bot_retina_device.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable, List from spinn_utilities.overrides import overrides from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex) +from spinn_front_end_common.utility_models import MultiCastCommand class AbstractPushBotRetinaDevice( @@ -35,7 +37,7 @@ def __init__(self, protocol, resolution): @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # add mode command if not done already if not self._protocol.sent_mode_command(): yield self._protocol.set_mode() @@ -50,10 +52,10 @@ def start_resume_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: yield self._protocol.disable_retina() @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] diff --git a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_device.py b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_device.py index 9d2f63ef9f..45878d619f 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_device.py @@ -15,7 +15,9 @@ from spinn_utilities.overrides import overrides from spinn_utilities.abstract_base import AbstractBase, abstractmethod from spynnaker.pyNN.external_devices_models import ( - AbstractMulticastControllableDevice) + AbstractMulticastControllableDevice, SendType) +from spynnaker.pyNN.protocols import MunichIoSpiNNakerLinkProtocol + # The default timestep to use for first send. Avoids clashes with other # control commands. @@ -29,10 +31,11 @@ class PushBotEthernetDevice( """ def __init__( - self, protocol, device, uses_payload, time_between_send, + self, protocol: MunichIoSpiNNakerLinkProtocol, device, + uses_payload, time_between_send, first_send_timestep=_DEFAULT_FIRST_SEND_TIMESTEP): """ - :param MunichIoEthernetProtocol protocol: + :param MunichIoSpiNNakerLinkProtocol protocol: The protocol instance to get commands from :param AbstractPushBotOutputDevice device: The Enum instance of the device to control @@ -51,58 +54,59 @@ def __init__( @property @overrides(AbstractMulticastControllableDevice.device_control_key) - def device_control_key(self): + def device_control_key(self) -> int: return self.__device.protocol_property.fget(self.__protocol) @property @overrides(AbstractMulticastControllableDevice.device_control_partition_id) - def device_control_partition_id(self): + def device_control_partition_id(self) -> str: return f"{self.__device.name}_PARTITION_ID" @property @overrides(AbstractMulticastControllableDevice.device_control_uses_payload) - def device_control_uses_payload(self): + def device_control_uses_payload(self) -> bool: return self.__uses_payload @property @overrides(AbstractMulticastControllableDevice.device_control_min_value) - def device_control_min_value(self): + def device_control_min_value(self) -> float: return self.__device.min_value @property @overrides(AbstractMulticastControllableDevice.device_control_max_value) - def device_control_max_value(self): + def device_control_max_value(self) -> float: return self.__device.max_value @property @overrides(AbstractMulticastControllableDevice .device_control_timesteps_between_sending) - def device_control_timesteps_between_sending(self): + def device_control_timesteps_between_sending(self) -> int: return self.__time_between_send @property @overrides(AbstractMulticastControllableDevice .device_control_send_type) - def device_control_send_type(self): + def device_control_send_type(self) -> SendType: return self.__device.send_type @property @overrides(AbstractMulticastControllableDevice .device_control_first_send_timestep) - def device_control_first_send_timestep(self): + def device_control_first_send_timestep(self) -> int: return self.__first_send_timestep @property - def protocol(self): + def protocol(self) -> MunichIoSpiNNakerLinkProtocol: """ The protocol instance, for use in the subclass. - :rtype: MunichIoEthernetProtocol + :rtype: MunichIoSpiNNakerLinkProtocol """ return self.__protocol @abstractmethod - def set_command_protocol(self, command_protocol): + def set_command_protocol( + self, command_protocol: MunichIoSpiNNakerLinkProtocol): """ Set the protocol use to send setup and shutdown commands, separately from the protocol used to control the device. diff --git a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_laser_device.py b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_laser_device.py index 51f788bf35..6df2bb72a2 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_laser_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_laser_device.py @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable, List from spinn_utilities.overrides import overrides from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex) from spinn_front_end_common.utilities.exceptions import ConfigurationException +from spinn_front_end_common.utility_models import MultiCastCommand from .push_bot_device import PushBotEthernetDevice from spynnaker.pyNN.external_devices_models.push_bot.parameters import ( PushBotLaser) +from spynnaker.pyNN.protocols import MunichIoSpiNNakerLinkProtocol class PushBotEthernetLaserDevice( @@ -28,7 +31,7 @@ class PushBotEthernetLaserDevice( """ def __init__( - self, laser, protocol, + self, laser, protocol: MunichIoSpiNNakerLinkProtocol, start_active_time=None, start_total_period=None, start_frequency=None, timesteps_between_send=None): """ @@ -58,12 +61,13 @@ def __init__( self.__start_frequency = start_frequency @overrides(PushBotEthernetDevice.set_command_protocol) - def set_command_protocol(self, command_protocol): + def set_command_protocol( + self, command_protocol: MunichIoSpiNNakerLinkProtocol): self.__command_protocol = command_protocol @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # add mode command if not done already if not self.protocol.sent_mode_command(): yield self.protocol.set_mode() @@ -81,12 +85,12 @@ def start_resume_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: yield self.__command_protocol.push_bot_laser_config_total_period(0) yield self.__command_protocol.push_bot_laser_config_active_time(0) yield self.__command_protocol.push_bot_laser_set_frequency(0) @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] diff --git a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_led_device.py b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_led_device.py index 0e1e29f7c8..9ec22fd1d7 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_led_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_led_device.py @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable, List from spinn_utilities.overrides import overrides from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex) from spinn_front_end_common.utilities.exceptions import ConfigurationException +from spinn_front_end_common.utility_models import MultiCastCommand from .push_bot_device import PushBotEthernetDevice from spynnaker.pyNN.external_devices_models.push_bot.parameters import ( PushBotLED) +from spynnaker.pyNN.protocols import MunichIoSpiNNakerLinkProtocol class PushBotEthernetLEDDevice( @@ -28,13 +31,13 @@ class PushBotEthernetLEDDevice( """ def __init__( - self, led, protocol, + self, led, protocol: MunichIoSpiNNakerLinkProtocol, start_active_time_front=None, start_active_time_back=None, start_total_period=None, start_frequency=None, timesteps_between_send=None): """ :param PushBotLED led: The LED to control - :param MunichIoEthernetProtocol protocol: + :param MunichIoSpiNNakerLinkProtocol protocol: The protocol instance to get commands from :param int start_active_time_front: The "active time" to set for the front LED at the start @@ -61,12 +64,13 @@ def __init__( self.__start_frequency = start_frequency @overrides(PushBotEthernetDevice.set_command_protocol) - def set_command_protocol(self, command_protocol): + def set_command_protocol( + self, command_protocol: MunichIoSpiNNakerLinkProtocol): self.__command_protocol = command_protocol @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # add mode command if not done already if not self.protocol.sent_mode_command(): yield self.protocol.set_mode() @@ -87,7 +91,7 @@ def start_resume_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: yield self.__command_protocol.push_bot_led_front_active_time(0) yield self.__command_protocol.push_bot_led_back_active_time(0) yield self.__command_protocol.push_bot_led_total_period(0) @@ -95,5 +99,5 @@ def pause_stop_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] diff --git a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_motor_device.py b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_motor_device.py index 25b4b6e17e..dcb740ae69 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_motor_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_motor_device.py @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable, List from spinn_utilities.overrides import overrides from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex) from spinn_front_end_common.utilities.exceptions import ConfigurationException +from spinn_front_end_common.utility_models import MultiCastCommand from .push_bot_device import PushBotEthernetDevice from spynnaker.pyNN.external_devices_models.push_bot.parameters import ( PushBotMotor) +from spynnaker.pyNN.protocols import MunichIoSpiNNakerLinkProtocol class PushBotEthernetMotorDevice( @@ -27,10 +30,11 @@ class PushBotEthernetMotorDevice( The motor of a PushBot. """ - def __init__(self, motor, protocol, timesteps_between_send=None): + def __init__(self, motor, protocol: MunichIoSpiNNakerLinkProtocol, + timesteps_between_send=None): """ :param PushBotMotor motor: indicates which motor to control - :param MunichIoEthernetProtocol protocol: + :param MunichIoSpiNNakerLinkProtocol protocol: The protocol used to control the device :param int timesteps_between_send: The number of timesteps between sending commands to the device, @@ -44,12 +48,13 @@ def __init__(self, motor, protocol, timesteps_between_send=None): self.__command_protocol = protocol @overrides(PushBotEthernetDevice.set_command_protocol) - def set_command_protocol(self, command_protocol): + def set_command_protocol( + self, command_protocol: MunichIoSpiNNakerLinkProtocol): self.__command_protocol = command_protocol @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # add mode command if not done already if not self.protocol.sent_mode_command(): yield self.protocol.set_mode() @@ -59,10 +64,10 @@ def start_resume_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: yield self.__command_protocol.generic_motor_disable() @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] diff --git a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_retina_device.py b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_retina_device.py index 3e4bf0bf89..b9786651ee 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_retina_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_retina_device.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict from spinn_utilities.overrides import overrides from spynnaker.pyNN.external_devices_models import AbstractEthernetSensor from .push_bot_translator import PushBotTranslator @@ -52,23 +53,23 @@ def __init__( self.__n_neurons = resolution.value.n_neurons @overrides(AbstractEthernetSensor.get_n_neurons) - def get_n_neurons(self): + def get_n_neurons(self) -> int: return self.__n_neurons @overrides(AbstractEthernetSensor.get_injector_parameters) - def get_injector_parameters(self): + def get_injector_parameters(self) -> Dict[str, Any]: return {"port": self.__injector_port} @overrides(AbstractEthernetSensor.get_injector_label) - def get_injector_label(self): + def get_injector_label(self) -> str: return self.__retina_injector_label @overrides(AbstractEthernetSensor.get_translator) - def get_translator(self): + def get_translator(self) -> PushBotTranslator: return self.__translator @overrides(AbstractEthernetSensor.get_database_connection) - def get_database_connection(self): + def get_database_connection(self) -> PushBotRetinaConnection: """ :rtype: PushBotRetinaConnection """ diff --git a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_speaker_device.py b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_speaker_device.py index 8a1fac2dfa..50b4e20ab1 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_speaker_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/ethernet/push_bot_speaker_device.py @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable, List from spinn_utilities.overrides import overrides from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex) from spinn_front_end_common.utilities.exceptions import ConfigurationException +from spinn_front_end_common.utility_models import MultiCastCommand from .push_bot_device import PushBotEthernetDevice from spynnaker.pyNN.external_devices_models.push_bot.parameters import ( PushBotSpeaker) +from spynnaker.pyNN.protocols import MunichIoSpiNNakerLinkProtocol class PushBotEthernetSpeakerDevice( @@ -28,12 +31,12 @@ class PushBotEthernetSpeakerDevice( """ def __init__( - self, speaker, protocol, start_active_time=0, - start_total_period=0, start_frequency=0, start_melody=None, - timesteps_between_send=None): + self, speaker, protocol: MunichIoSpiNNakerLinkProtocol, + start_active_time=0, start_total_period=0, start_frequency=0, + start_melody=None, timesteps_between_send=None): """ :param PushBotSpeaker speaker: The speaker to control - :param MunichIoEthernetProtocol protocol: + :param MunichIoSpiNNakerLinkProtocol protocol: The protocol instance to get commands from :param int start_active_time: The "active time" to set at the start :param int start_total_period: The "total period" to set at the start @@ -58,12 +61,13 @@ def __init__( self.__start_melody = start_melody @overrides(PushBotEthernetDevice.set_command_protocol) - def set_command_protocol(self, command_protocol): + def set_command_protocol( + self, command_protocol: MunichIoSpiNNakerLinkProtocol): self.__command_protocol = command_protocol @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # add mode command if not done already if not self.protocol.sent_mode_command(): yield self.protocol.set_mode() @@ -82,12 +86,12 @@ def start_resume_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: yield self.__command_protocol.push_bot_speaker_config_total_period(0) yield self.__command_protocol.push_bot_speaker_config_active_time(0) yield self.__command_protocol.push_bot_speaker_set_tone(0) @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] diff --git a/spynnaker/pyNN/external_devices_models/push_bot/spinnaker_link/push_bot_retina_device.py b/spynnaker/pyNN/external_devices_models/push_bot/spinnaker_link/push_bot_retina_device.py index 86752b8d3c..44814256ed 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/spinnaker_link/push_bot_retina_device.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/spinnaker_link/push_bot_retina_device.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import List from spinn_utilities.overrides import overrides from pacman.model.graphs.application import ApplicationSpiNNakerLinkVertex from spinn_front_end_common.utility_models import MultiCastCommand @@ -88,9 +89,10 @@ def new_key_command_payload(self): @property @overrides(AbstractPushBotRetinaDevice.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands( + self) -> List[MultiCastCommand]: # Update the commands with the additional one to set the key - new_commands = list() + new_commands: List[MultiCastCommand] = list() for command in super().start_resume_commands: if command.key == self._protocol.set_retina_transmission_key: # This has to be stored so that the payload can be updated diff --git a/spynnaker/pyNN/external_devices_models/spif_devices.py b/spynnaker/pyNN/external_devices_models/spif_devices.py index 8989a93e78..dad11dfb59 100644 --- a/spynnaker/pyNN/external_devices_models/spif_devices.py +++ b/spynnaker/pyNN/external_devices_models/spif_devices.py @@ -354,12 +354,12 @@ def __init__(self, key, get_payload, repeat, delay_between_repeats, index): @property @overrides(MultiCastCommand.payload) - def payload(self): + def payload(self) -> int: return self.__get_payload(self.__index) @property @overrides(MultiCastCommand.is_payload) - def is_payload(self): + def is_payload(self) -> bool: return True diff --git a/spynnaker/pyNN/external_devices_models/spif_input_device.py b/spynnaker/pyNN/external_devices_models/spif_input_device.py index 4d3c349729..143291291a 100644 --- a/spynnaker/pyNN/external_devices_models/spif_input_device.py +++ b/spynnaker/pyNN/external_devices_models/spif_input_device.py @@ -12,15 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. import math +from typing import Iterable, List, Tuple from spinn_utilities.overrides import overrides from pacman.model.graphs.application import ( ApplicationFPGAVertex, FPGAConnection) -from pacman.model.routing_info import BaseKeyAndMask +from pacman.model.graphs.common.slice import Slice +from pacman.model.graphs.machine import MachineFPGAVertex, MachineVertex +from pacman.model.routing_info import BaseKeyAndMask, RoutingInfo from pacman.utilities.constants import BITS_IN_KEY from pacman.utilities.utility_calls import get_n_bits from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex) from spinn_front_end_common.utilities.exceptions import ConfigurationException +from spinn_front_end_common.utility_models import MultiCastCommand from spynnaker.pyNN.models.common import PopulationApplicationVertex from .spif_devices import ( SPIF_FPGA_ID, SPIF_OUTPUT_FPGA_LINK, SPIF_INPUT_FPGA_LINKS, @@ -180,13 +184,16 @@ def __fpga_index(self, fpga_link_id): return (fpga_link_id - 1) // 2 @overrides(ApplicationFPGAVertex.get_incoming_slice_for_link) - def get_incoming_slice_for_link(self, link, index): + def get_incoming_slice_for_link( + self, link: FPGAConnection, index: int) -> Slice: vertex_slice = super().get_incoming_slice_for_link(link, index) self.__index_by_slice[link.fpga_link_id, vertex_slice] = index return vertex_slice @overrides(ApplicationFPGAVertex.get_machine_fixed_key_and_mask) - def get_machine_fixed_key_and_mask(self, machine_vertex, partition_id): + def get_machine_fixed_key_and_mask(self, machine_vertex: MachineVertex, + partition_id: str) -> BaseKeyAndMask: + assert isinstance(machine_vertex, MachineFPGAVertex) fpga_link_id = machine_vertex.fpga_link_id vertex_slice = machine_vertex.vertex_slice index = self.__index_by_slice[fpga_link_id, vertex_slice] @@ -200,12 +207,12 @@ def get_machine_fixed_key_and_mask(self, machine_vertex, partition_id): return BaseKeyAndMask(fpga_key, fpga_mask) @overrides(ApplicationFPGAVertex.get_fixed_key_and_mask) - def get_fixed_key_and_mask(self, partition_id): + def get_fixed_key_and_mask(self, partition_id: str) -> BaseKeyAndMask: return BaseKeyAndMask(self.__base_key, self.__key_mask) @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # Make sure everything has stopped commands = [SpiNNFPGARegister.STOP.cmd()] @@ -266,17 +273,19 @@ def __spif_key(self, fpga_link_id): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: # Send the stop signal return [SpiNNFPGARegister.STOP.cmd()] @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] @overrides(PopulationApplicationVertex.get_atom_key_map) - def get_atom_key_map(self, pre_vertex, partition_id, routing_info): + def get_atom_key_map( + self, pre_vertex: MachineVertex, partition_id: str, + routing_info: RoutingInfo) -> Iterable[Tuple[int, int]]: # Work out which machine vertex start = pre_vertex.vertex_slice.lo_atom key_and_mask = self.get_machine_fixed_key_and_mask( diff --git a/spynnaker/pyNN/external_devices_models/spif_output_device.py b/spynnaker/pyNN/external_devices_models/spif_output_device.py index 846ec236dd..b953f4f26b 100644 --- a/spynnaker/pyNN/external_devices_models/spif_output_device.py +++ b/spynnaker/pyNN/external_devices_models/spif_output_device.py @@ -16,11 +16,13 @@ from spinn_utilities.overrides import overrides from spinn_utilities.config_holder import set_config from pacman.model.graphs.application import ( - ApplicationFPGAVertex, FPGAConnection) + ApplicationEdge, ApplicationEdgePartition, ApplicationFPGAVertex, + FPGAConnection) from pacman.model.graphs.machine import MachineVertex from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex, LiveOutputDevice, HasCustomAtomKeyMap) +from spinn_front_end_common.utility_models import MultiCastCommand from spynnaker.pyNN.models.common import PopulationApplicationVertex from spynnaker.pyNN.data.spynnaker_data_view import SpynnakerDataView from spynnaker.pyNN.spynnaker_external_device_plugin_manager import ( @@ -125,7 +127,8 @@ def __is_power_of_2(self, v): return (v & (v - 1) == 0) and (v != 0) @overrides(ApplicationFPGAVertex.add_incoming_edge) - def add_incoming_edge(self, edge, partition): + def add_incoming_edge( + self, edge: ApplicationEdge, partition: ApplicationEdgePartition): # Only add edges from PopulationApplicationVertices if not isinstance(edge.pre_vertex, PopulationApplicationVertex): if not isinstance(edge.pre_vertex, CommandSender): @@ -188,7 +191,7 @@ def _get_set_dist_mask_payload(self, index): @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # The commands here are delayed, as at the time of providing them, # we don't know the key or mask of the incoming link... commands = list() @@ -210,12 +213,12 @@ def start_resume_commands(self): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: return [] @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] @overrides(LiveOutputDevice.get_device_output_keys) diff --git a/spynnaker/pyNN/external_devices_models/spif_retina_device.py b/spynnaker/pyNN/external_devices_models/spif_retina_device.py index b184f179a9..ff6accf2fa 100644 --- a/spynnaker/pyNN/external_devices_models/spif_retina_device.py +++ b/spynnaker/pyNN/external_devices_models/spif_retina_device.py @@ -11,15 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable, List, Tuple from spinn_utilities.overrides import overrides +from pacman.model.graphs.machine import MachineFPGAVertex from pacman.model.graphs.application import ( Application2DFPGAVertex, FPGAConnection) -from pacman.model.routing_info import BaseKeyAndMask +from pacman.model.graphs.common import Slice +from pacman.model.graphs.machine import MachineVertex +from pacman.model.routing_info import BaseKeyAndMask, RoutingInfo from pacman.utilities.constants import BITS_IN_KEY from pacman.utilities.utility_calls import is_power_of_2 from spinn_front_end_common.abstract_models import ( AbstractSendMeMulticastCommandsVertex) from spinn_front_end_common.utilities.exceptions import ConfigurationException +from spinn_front_end_common.utility_models import MultiCastCommand from spynnaker.pyNN.models.common import PopulationApplicationVertex from .spif_devices import ( SPIF_FPGA_ID, SPIF_OUTPUT_FPGA_LINK, SPIF_INPUT_FPGA_LINKS, @@ -201,13 +206,17 @@ def __fpga_indices(self, fpga_link_id): return fpga_x_index, fpga_y_index @overrides(Application2DFPGAVertex.get_incoming_slice_for_link) - def get_incoming_slice_for_link(self, link, index): + def get_incoming_slice_for_link( + self, link: FPGAConnection, index: int) -> Slice: vertex_slice = super().get_incoming_slice_for_link(link, index) self.__index_by_slice[link.fpga_link_id, vertex_slice] = index return vertex_slice @overrides(Application2DFPGAVertex.get_machine_fixed_key_and_mask) - def get_machine_fixed_key_and_mask(self, machine_vertex, partition_id): + def get_machine_fixed_key_and_mask( + self, machine_vertex: MachineVertex, + partition_id: str) -> BaseKeyAndMask: + assert isinstance(machine_vertex, MachineFPGAVertex) fpga_link_id = machine_vertex.fpga_link_id vertex_slice = machine_vertex.vertex_slice index = self.__index_by_slice[fpga_link_id, vertex_slice] @@ -223,14 +232,14 @@ def get_machine_fixed_key_and_mask(self, machine_vertex, partition_id): return BaseKeyAndMask(fpga_key, fpga_mask) @overrides(Application2DFPGAVertex.get_fixed_key_and_mask) - def get_fixed_key_and_mask(self, partition_id): + def get_fixed_key_and_mask(self, partition_id: str) -> BaseKeyAndMask: n_key_bits = BITS_IN_KEY - self._key_shift key_mask = ((1 << n_key_bits) - 1) << self._key_shift return BaseKeyAndMask(self.__base_key << self._key_shift, key_mask) @property @overrides(AbstractSendMeMulticastCommandsVertex.start_resume_commands) - def start_resume_commands(self): + def start_resume_commands(self) -> Iterable[MultiCastCommand]: # Make sure everything has stopped commands = [SpiNNFPGARegister.STOP.cmd()] @@ -297,17 +306,19 @@ def __spif_key(self, fpga_link_id): @property @overrides(AbstractSendMeMulticastCommandsVertex.pause_stop_commands) - def pause_stop_commands(self): + def pause_stop_commands(self) -> Iterable[MultiCastCommand]: # Send the stop signal yield SpiNNFPGARegister.STOP.cmd() @property @overrides(AbstractSendMeMulticastCommandsVertex.timed_commands) - def timed_commands(self): + def timed_commands(self) -> List[MultiCastCommand]: return [] @overrides(PopulationApplicationVertex.get_atom_key_map) - def get_atom_key_map(self, pre_vertex, partition_id, routing_info): + def get_atom_key_map( + self, pre_vertex: MachineVertex, partition_id: str, + routing_info: RoutingInfo) -> Iterable[Tuple[int, int]]: # Work out which machine vertex x_start, y_start = pre_vertex.vertex_slice.start key_and_mask = self.get_machine_fixed_key_and_mask( diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py index 543374dadd..58cf3b5d63 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py @@ -104,16 +104,19 @@ def get_out_going_slices(self) -> List[Slice]: return self._get_fixed_slices() @overrides(AbstractSplitterCommon.get_out_going_vertices) - def get_out_going_vertices(self, partition_id) -> List[MachineVertex]: + def get_out_going_vertices( + self, partition_id: str) -> Sequence[MachineVertex]: return list(self.governed_app_vertex.machine_vertices) @overrides(AbstractSplitterCommon.get_in_coming_vertices) - def get_in_coming_vertices(self, partition_id) -> List[MachineVertex]: + def get_in_coming_vertices( + self, partition_id: str) -> Sequence[MachineVertex]: return list(self.governed_app_vertex.machine_vertices) @overrides(AbstractSplitterCommon.get_source_specific_in_coming_vertices) def get_source_specific_in_coming_vertices( - self, source_vertex: ApplicationVertex, partition_id) -> List[ + self, source_vertex: ApplicationVertex, + partition_id: str) -> List[ Tuple[MachineVertex, Sequence[MachineVertex]]]: # Determine the real pre-vertex pre_vertex = source_vertex @@ -138,7 +141,7 @@ def get_source_specific_in_coming_vertices( @overrides(AbstractSplitterCommon.machine_vertices_for_recording) def machine_vertices_for_recording( - self, variable_to_record) -> Iterable[MachineVertex]: + self, variable_to_record: str) -> Iterable[MachineVertex]: return self.governed_app_vertex.machine_vertices def create_machine_vertex( diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py index 27570bf56c..8aa987a13d 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py @@ -564,12 +564,12 @@ def get_out_going_slices(self) -> Sequence[Slice]: return self._get_fixed_slices() @overrides(AbstractSplitterCommon.get_out_going_vertices) - def get_out_going_vertices(self, partition_id) -> Sequence[ + def get_out_going_vertices(self, partition_id: str) -> Sequence[ PopulationNeuronsMachineVertex]: return self.__neuron_vertices @overrides(AbstractSplitterCommon.get_in_coming_vertices) - def get_in_coming_vertices(self, partition_id) -> Sequence[ + def get_in_coming_vertices(self, partition_id: str) -> Sequence[ PopulationSynapsesMachineVertexCommon]: return self.__synapse_vertices diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py index 4690805737..0e573edd19 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_delay_vertex_slice.py @@ -11,7 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Sequence, Tuple + +from typing import Dict, Iterable, Sequence, Tuple from spinn_utilities.overrides import overrides from pacman.exceptions import ( PacmanConfigurationException, PacmanInvalidParameterException) @@ -136,7 +137,8 @@ def get_sdram_used_by_atoms(self) -> AbstractSDRAM: DelayExtensionMachineVertex.N_EXTRA_PROVENANCE_DATA_ENTRIES)) @overrides(AbstractSplitterCommon.machine_vertices_for_recording) - def machine_vertices_for_recording(self, variable_to_record): + def machine_vertices_for_recording( + self, variable_to_record: str) -> Iterable[MachineVertex]: raise PacmanInvalidParameterException( variable_to_record, variable_to_record, self.DELAY_RECORDING_ERROR) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py index 2e54f3259d..dfbb91d93e 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py @@ -20,7 +20,6 @@ from typing import TYPE_CHECKING from spinn_utilities.abstract_base import AbstractBase from spinn_utilities.overrides import overrides -from pacman.model.graphs.application import ApplicationEdge from spynnaker.pyNN.models.neural_projections.connectors import ( AbstractConnector) from spynnaker.pyNN.exceptions import SynapticConfigurationException @@ -33,7 +32,8 @@ from pyNN.random import RandomDistribution from spynnaker.pyNN.utilities.utility_calls import check_rng if TYPE_CHECKING: - from spynnaker.pyNN.models.neural_projections import SynapseInformation + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) # Hashes of the connection generators supported by the synapse expander @@ -57,7 +57,7 @@ class AbstractGenerateConnectorOnMachine( @overrides(AbstractConnector.validate_connection) def validate_connection( - self, application_edge: ApplicationEdge, + self, application_edge: ProjectionApplicationEdge, synapse_info: SynapseInformation): # If we can't generate on machine, we must be able to generate on host if not self.generate_on_machine(synapse_info): diff --git a/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py index 1f44605a31..c9fa80f00c 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py @@ -73,12 +73,12 @@ def __init__(self, array: NDArray[uint8], self.__array_dims = dims @overrides(AbstractConnector.get_delay_maximum) - def get_delay_maximum(self, synapse_info: SynapseInformation): + def get_delay_maximum(self, synapse_info: SynapseInformation) -> float: return self._get_delay_maximum( synapse_info.delays, len(self.__array), synapse_info) @overrides(AbstractConnector.get_delay_minimum) - def get_delay_minimum(self, synapse_info: SynapseInformation): + def get_delay_minimum(self, synapse_info: SynapseInformation) -> float: return self._get_delay_minimum( synapse_info.delays, len(self.__array), synapse_info) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py index e770f6c1ce..46eaea823b 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py @@ -14,7 +14,7 @@ from __future__ import annotations import numpy from numpy.typing import NDArray -from typing import List, Optional, Tuple, TYPE_CHECKING +from typing import List, Optional, Tuple, TYPE_CHECKING, Sequence from spinn_utilities.overrides import overrides from pacman.model.graphs.common import Slice from .abstract_connector import AbstractConnector @@ -149,8 +149,8 @@ def get_weight_maximum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type: int, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: n_connections, pair_list = self._get_n_connections( post_vertex_slice, synapse_info) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py index f27c9e127c..749b7e0bf8 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py @@ -21,7 +21,7 @@ minimum, e, pi, floating) from numpy.typing import NDArray from pyNN.random import NumpyRNG -from typing import Optional, TYPE_CHECKING +from typing import Optional, Sequence, TYPE_CHECKING from spinn_utilities.overrides import overrides from spinn_utilities.safe_eval import SafeEval from pacman.model.graphs.common import Slice @@ -159,7 +159,7 @@ def get_delay_minimum(self, synapse_info: SynapseInformation) -> float: def get_n_connections_from_pre_vertex_maximum( self, n_post_atoms: int, synapse_info: SynapseInformation, min_delay: Optional[float] = None, - max_delay: Optional[float] = None): + max_delay: Optional[float] = None) -> int: max_prob = numpy.amax(self._probs) n_connections = get_probable_maximum_selected( synapse_info.n_pre_neurons * synapse_info.n_post_neurons, @@ -193,8 +193,8 @@ def get_weight_maximum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type: int, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: probs = self._probs[:, post_vertex_slice.get_raster_ids()].reshape(-1) n_items = synapse_info.n_pre_neurons * post_vertex_slice.n_atoms items = self.__rng.next(n_items) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py index 7cefc03ad2..468413211d 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py @@ -17,7 +17,7 @@ from numpy import integer, uint32 from numpy.typing import NDArray from pyNN.random import NumpyRNG -from typing import List, Optional, TYPE_CHECKING +from typing import List, Optional, Sequence, TYPE_CHECKING from spinn_utilities.overrides import overrides from pacman.model.graphs.common import Slice from spinn_front_end_common.utilities.constants import BYTES_PER_WORD @@ -29,7 +29,8 @@ from .abstract_generate_connector_on_host import ( AbstractGenerateConnectorOnHost) if TYPE_CHECKING: - from spynnaker.pyNN.models.neural_projections import SynapseInformation + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) N_GEN_PARAMS = 8 @@ -256,8 +257,8 @@ def get_weight_maximum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type: int, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: # Get lo and hi for the pre vertex lo = 0 hi = synapse_info.n_pre_neurons - 1 @@ -327,6 +328,7 @@ def gen_connector_params_size_in_bytes(self) -> int: @overrides(AbstractConnector.validate_connection) def validate_connection( - self, application_edge, synapse_info: SynapseInformation): + self, application_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation): if self.generate_on_machine(synapse_info): utility_calls.check_rng(self.__rng, "FixedNumberPostConnector") diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py index 4f4eb74cac..aea70023ce 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py @@ -17,7 +17,7 @@ from numpy import integer, uint32 from numpy.typing import NDArray from pyNN.random import NumpyRNG -from typing import List, Optional, TYPE_CHECKING +from typing import List, Optional, Sequence, TYPE_CHECKING from spinn_utilities.overrides import overrides from pacman.model.graphs.common import Slice from spinn_front_end_common.utilities.constants import BYTES_PER_WORD @@ -29,7 +29,8 @@ from .abstract_generate_connector_on_host import ( AbstractGenerateConnectorOnHost) if TYPE_CHECKING: - from spynnaker.pyNN.models.neural_projections import SynapseInformation + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) class FixedNumberPreConnector(AbstractGenerateConnectorOnMachine, @@ -216,8 +217,8 @@ def get_weight_maximum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type: int, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: # Get lo and hi for the post vertex lo = post_vertex_slice.lo_atom hi = post_vertex_slice.hi_atom @@ -287,6 +288,7 @@ def gen_connector_params_size_in_bytes(self) -> int: @overrides(AbstractConnector.validate_connection) def validate_connection( - self, application_edge, synapse_info: SynapseInformation): + self, application_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation): if self.generate_on_machine(synapse_info): utility_calls.check_rng(self.__rng, "FixedNumberPreConnector") diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py index 7f11599956..c0310dc948 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py @@ -32,7 +32,8 @@ from .abstract_generate_connector_on_host import ( AbstractGenerateConnectorOnHost) if TYPE_CHECKING: - from spynnaker.pyNN.models.neural_projections import SynapseInformation + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) logger = FormatAdapter(logging.getLogger(__name__)) @@ -114,7 +115,8 @@ def get_delay_minimum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractConnector.get_n_connections_from_pre_vertex_maximum) def get_n_connections_from_pre_vertex_maximum( self, n_post_atoms: int, synapse_info: SynapseInformation, - min_delay=None, max_delay=None) -> int: + min_delay: Optional[float] = None, + max_delay: Optional[float] = None) -> int: n_connections = get_probable_maximum_selected( synapse_info.n_pre_neurons * synapse_info.n_post_neurons, n_post_atoms, self._p_connect, chance=1.0/10000.0) @@ -215,6 +217,7 @@ def p_connect(self, new_value: float): @overrides(AbstractConnector.validate_connection) def validate_connection( - self, application_edge, synapse_info: SynapseInformation): + self, application_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation): if self.generate_on_machine(synapse_info): check_rng(self.__rng, "FixedProbabilityConnector") diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index efd8c015b5..2821e7271e 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -29,13 +29,14 @@ from pacman.model.graphs.common import Slice from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.exceptions import InvalidParameterType -from spynnaker.pyNN.types import Weight_Types +from spynnaker.pyNN.types import Delay_Types, Weight_Delay_Types, Weight_Types from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID from .abstract_connector import AbstractConnector from .abstract_generate_connector_on_host import ( AbstractGenerateConnectorOnHost) if TYPE_CHECKING: - from spynnaker.pyNN.models.neural_projections import SynapseInformation + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) from spynnaker.pyNN.models.neuron.synapse_dynamics import ( AbstractSynapseDynamics) @@ -47,8 +48,9 @@ _FIRST_PARAM = 2 -def _is_sequential(value) -> TypeGuard[Union[List, NDArray]]: - return isinstance(value, (list, numpy.ndarray)) +def _is_sequential(value: Weight_Delay_Types + ) -> TypeGuard[NDArray[numpy.float64]]: + return isinstance(value, numpy.ndarray) @dataclass(frozen=True) @@ -146,8 +148,8 @@ def get_delay_minimum(self, synapse_info: SynapseInformation) -> float: return numpy.min(self.__delays) @overrides(AbstractConnector.get_delay_variance) - def get_delay_variance( - self, delays, synapse_info: SynapseInformation) -> float: + def get_delay_variance(self, delays: Delay_Types, + synapse_info: SynapseInformation) -> float: if self.__delays is None: if _is_sequential(synapse_info.delays): return float(numpy.var(synapse_info.delays)) @@ -165,8 +167,9 @@ def __id_to_m_vertex_index(self, n_atoms: int, slices: Sequence[Slice]): mapping[s.get_raster_ids()] = i return mapping - def _split_connections(self, n_pre_atoms: int, n_post_atoms: int, - post_slices: Sequence[Slice]): + def _split_connections( + self, n_pre_atoms: int, n_post_atoms: int, + post_slices: Sequence[Slice]): """ :param list(~pacman.model.graphs.common.Slice) post_slices: """ @@ -217,16 +220,19 @@ def _split_connections(self, n_pre_atoms: int, n_post_atoms: int, @overrides(AbstractConnector.get_n_connections_from_pre_vertex_maximum) def get_n_connections_from_pre_vertex_maximum( self, n_post_atoms: int, synapse_info: SynapseInformation, - min_delay=None, max_delay=None) -> int: + min_delay: Optional[float] = None, + max_delay: Optional[float] = None) -> int: mask = None delays_handled = False - if (min_delay is not None and max_delay is not None and - (self.__delays is not None or - _is_sequential(synapse_info.delays))): - delays = synapse_info.delays if self.__delays is None \ - else self.__delays - mask = ((delays >= min_delay) & (delays <= max_delay)) - delays_handled = True + if (min_delay is not None and max_delay is not None): + if self.__delays is not None: + mask = ((self.__delays >= min_delay) & + (self.__delays <= max_delay)) + delays_handled = True + elif _is_sequential(synapse_info.delays): + delays = synapse_info.delays + mask = ((delays >= min_delay) & (delays <= max_delay)) + delays_handled = True if mask is None: conns = self.__conn_list.copy() else: @@ -273,15 +279,16 @@ def __numpy_group(conns: NDArray, column: int) -> List[NDArray]: return numpy.array_split(conns, split_points) @overrides(AbstractConnector.get_n_connections_to_post_vertex_maximum) - def get_n_connections_to_post_vertex_maximum(self, synapse_info) -> int: + def get_n_connections_to_post_vertex_maximum( + self, synapse_info: SynapseInformation) -> int: if not len(self.__targets): return 0 return int(numpy.max(numpy.bincount( self.__targets.astype(int64, copy=False)))) @overrides(AbstractConnector.get_weight_mean) - def get_weight_mean( - self, weights, synapse_info: SynapseInformation) -> float: + def get_weight_mean(self, weights: Weight_Types, + synapse_info: SynapseInformation) -> float: if self.__weights is None: if _is_sequential(synapse_info.weights): return float(numpy.mean(synapse_info.weights)) @@ -548,8 +555,9 @@ def _apply_parameters_to_synapse_type( synapse_type.set_value(name, self.__extra_params.data[:, i]) @overrides(AbstractConnector.validate_connection) - def validate_connection(self, application_edge, - synapse_info: SynapseInformation): + def validate_connection( + self, application_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation): out_of_range_targets = self.__targets >= synapse_info.n_post_neurons if any(out_of_range_targets): logger.warning( diff --git a/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py index e63777b10e..883d113d95 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py @@ -21,7 +21,7 @@ minimum, e, pi) from numpy.typing import NDArray from pyNN.random import NumpyRNG -from typing import Optional, TYPE_CHECKING +from typing import Optional, Sequence, TYPE_CHECKING from spinn_utilities.overrides import overrides from spinn_utilities.safe_eval import SafeEval from pacman.model.graphs.common import Slice @@ -164,8 +164,8 @@ def get_weight_maximum( @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type: int, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: # setup probs here probs = self._update_probs_from_index_expression(synapse_info) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py index 2a27685eda..c6a3dc42dc 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py @@ -17,9 +17,11 @@ from numpy.typing import NDArray from pyNN.random import RandomDistribution from pyNN.space import Space -from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING from typing_extensions import TypeAlias from spinn_utilities.overrides import overrides +from pacman.model.graphs import AbstractVertex +from pacman.model.graphs.application import ApplicationVertex from pacman.model.graphs.machine import MachineVertex from pacman.model.graphs.common import Slice from spinn_front_end_common.interface.ds import DataType @@ -35,8 +37,8 @@ AbstractGenerateConnectorOnHost) from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID if TYPE_CHECKING: - from spynnaker.pyNN.models.neural_projections.synapse_information import ( - SynapseInformation) + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) _TwoD: TypeAlias = Union[List[int], Tuple[int, int]] _Kernel: TypeAlias = Union[ @@ -401,12 +403,14 @@ def get_delay_variance(self, delays: Delay_Types, @overrides(AbstractConnector.get_n_connections_from_pre_vertex_maximum) def get_n_connections_from_pre_vertex_maximum( - self, n_post_atoms: int, synapse_info, - min_delay=None, max_delay=None) -> int: + self, n_post_atoms: int, synapse_info: SynapseInformation, + min_delay: Optional[float] = None, + max_delay: Optional[float] = None) -> int: return numpy.clip(self._kernel_h * self._kernel_w, 0, n_post_atoms) @overrides(AbstractConnector.get_n_connections_to_post_vertex_maximum) - def get_n_connections_to_post_vertex_maximum(self, synapse_info) -> int: + def get_n_connections_to_post_vertex_maximum( + self, synapse_info: SynapseInformation) -> int: return numpy.clip(self._kernel_h * self._kernel_w, 0, 255) @overrides(AbstractConnector.get_weight_maximum) @@ -442,8 +446,8 @@ def __repr__(self): @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: (n_connections, all_post, all_pre_in_range, all_pre_in_range_delays, all_pre_in_range_weights) = self.__compute_statistics( synapse_info.weights, synapse_info.delays, post_vertex_slice, @@ -503,7 +507,9 @@ def gen_connector_params_size_in_bytes(self) -> int: @overrides(AbstractGenerateConnectorOnMachine.get_connected_vertices) def get_connected_vertices( - self, s_info: SynapseInformation, source_vertex, target_vertex): + self, s_info: SynapseInformation, source_vertex: ApplicationVertex, + target_vertex: ApplicationVertex) -> Sequence[ + Tuple[MachineVertex, Sequence[AbstractVertex]]]: src_splitter = source_vertex.splitter return [ (t_vert, @@ -543,7 +549,9 @@ def __connects(self, src_machine_vertex: MachineVertex, return True @overrides(AbstractConnector.validate_connection) - def validate_connection(self, application_edge, synapse_info): + def validate_connection( + self, application_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation): pre = application_edge.pre_vertex post = application_edge.post_vertex if len(pre.atoms_shape) != 1 or len(post.atoms_shape) != 1: diff --git a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py index d856114f9a..fc02773f35 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py @@ -30,7 +30,8 @@ from .abstract_generate_connector_on_host import ( AbstractGenerateConnectorOnHost) if TYPE_CHECKING: - from spynnaker.pyNN.models.neural_projections import SynapseInformation + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) class MultapseConnector(AbstractGenerateConnectorOnMachine, @@ -184,7 +185,8 @@ def _get_n_connections(self, post_slice_index: int) -> int: @overrides(AbstractConnector.get_n_connections_from_pre_vertex_maximum) def get_n_connections_from_pre_vertex_maximum( self, n_post_atoms: int, synapse_info: SynapseInformation, - min_delay=None, max_delay=None) -> int: + min_delay: Optional[float] = None, + max_delay: Optional[float] = None) -> int: # If the chance of there being a connection in the slice is almost 0, # there will probably be at least 1 connection somewhere prob_in_slice = min( @@ -296,6 +298,7 @@ def gen_connector_params_size_in_bytes(self) -> int: @overrides(AbstractConnector.validate_connection) def validate_connection( - self, application_edge, synapse_info: SynapseInformation): + self, application_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation): if self.generate_on_machine(synapse_info): utility_calls.check_rng(self.__rng, "MultapseConnector") diff --git a/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py index 86c40800e1..2936f24672 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py @@ -17,7 +17,7 @@ from numpy import integer, floating, uint32 from numpy.typing import NDArray from pyNN.random import RandomDistribution -from typing import Sequence, Tuple, TYPE_CHECKING +from typing import Optional, Sequence, Tuple, TYPE_CHECKING from spinn_utilities.overrides import overrides from spinn_utilities.safe_eval import SafeEval from pacman.model.graphs.application import ApplicationVertex @@ -83,8 +83,9 @@ def get_delay_minimum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractConnector.get_n_connections_from_pre_vertex_maximum) def get_n_connections_from_pre_vertex_maximum( - self, n_post_atoms, synapse_info: SynapseInformation, - min_delay=None, max_delay=None) -> int: + self, n_post_atoms: int, synapse_info: SynapseInformation, + min_delay: Optional[float] = None, + max_delay: Optional[float] = None) -> int: delays = synapse_info.delays if min_delay is None or max_delay is None or delays is None: @@ -109,7 +110,8 @@ def get_n_connections_from_pre_vertex_maximum( (min_delay <= slice_min_delay <= max_delay)) @overrides(AbstractConnector.get_n_connections_to_post_vertex_maximum) - def get_n_connections_to_post_vertex_maximum(self, synapse_info) -> int: + def get_n_connections_to_post_vertex_maximum( + self, synapse_info: SynapseInformation) -> int: return 1 @overrides(AbstractConnector.get_weight_maximum) @@ -121,8 +123,8 @@ def get_weight_maximum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type: int, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: # Get each pre_vertex id for each post_vertex id post_atoms = post_vertex_slice.get_raster_ids() pre_atoms = numpy.array(post_atoms) @@ -255,7 +257,7 @@ def __connects( return True @overrides(AbstractGenerateConnectorOnMachine.generate_on_machine) - def generate_on_machine(self, synapse_info): + def generate_on_machine(self, synapse_info: SynapseInformation) -> bool: # If we are doing a 1:1 connector and the pre or post vertex is # multi-dimensional and have different dimensions pre = synapse_info.pre_vertex diff --git a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py index 286d0c54d3..a55ad1f57d 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py @@ -198,7 +198,8 @@ def __get_n_weights( @overrides(AbstractConnector.validate_connection) def validate_connection( - self, application_edge: ProjectionApplicationEdge, synapse_info): + self, application_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation): pre = application_edge.pre_vertex post = application_edge.post_vertex if len(pre.atoms_shape) != 2: diff --git a/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py index 52952505f6..1291c398c0 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py @@ -16,7 +16,7 @@ import numpy from numpy.typing import NDArray from pyNN.random import NumpyRNG -from typing import Optional, TYPE_CHECKING +from typing import Optional, Sequence, TYPE_CHECKING from spinn_utilities.overrides import overrides from pacman.model.graphs.common import Slice from spinn_front_end_common.utilities.exceptions import ConfigurationException @@ -136,7 +136,8 @@ def get_delay_minimum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractConnector.get_n_connections_from_pre_vertex_maximum) def get_n_connections_from_pre_vertex_maximum( self, n_post_atoms: int, synapse_info: SynapseInformation, - min_delay=None, max_delay=None) -> int: + min_delay: Optional[float] = None, + max_delay: Optional[float] = None) -> int: assert self.__mask is not None # Break the array into n_post_atoms units split_positions = numpy.arange( @@ -171,8 +172,8 @@ def get_weight_maximum(self, synapse_info: SynapseInformation) -> float: @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( - self, post_slices, post_vertex_slice: Slice, synapse_type: int, - synapse_info: SynapseInformation) -> NDArray: + self, post_slices: Sequence[Slice], post_vertex_slice: Slice, + synapse_type: int, synapse_info: SynapseInformation) -> NDArray: if self.__mask is None: return numpy.zeros(0, dtype=self.NUMPY_SYNAPSES_DTYPE) raster_ids = post_vertex_slice.get_raster_ids() diff --git a/spynnaker/pyNN/models/neuron/local_only/abstract_local_only.py b/spynnaker/pyNN/models/neuron/local_only/abstract_local_only.py index eca8231ed2..cfc8dbd047 100644 --- a/spynnaker/pyNN/models/neuron/local_only/abstract_local_only.py +++ b/spynnaker/pyNN/models/neuron/local_only/abstract_local_only.py @@ -59,7 +59,7 @@ def get_parameters_usage_in_bytes( def write_parameters( self, spec: DataSpecificationGenerator, region: int, machine_vertex: PopulationMachineLocalOnlyCombinedVertex, - weight_scales: NDArray[floating]) -> None: + weight_scales: NDArray[floating]): """ Write the parameters to the data specification for a vertex. diff --git a/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py b/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py index 994bc28ef1..b83319f773 100644 --- a/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py +++ b/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py @@ -42,6 +42,8 @@ from spynnaker.pyNN.models.projection import Projection from spynnaker.pyNN.models.neuron import ( PopulationMachineLocalOnlyCombinedVertex) + from spynnaker.pyNN.models.neuron.synapse_dynamics import ( + AbstractSynapseDynamics) #: Size of convolution config main bytes @@ -78,7 +80,8 @@ def _delay(self) -> float: return cast(float, self.delay) @overrides(AbstractLocalOnly.merge) - def merge(self, synapse_dynamics) -> LocalOnlyConvolution: + def merge(self, synapse_dynamics: AbstractSynapseDynamics + ) -> LocalOnlyConvolution: if not isinstance(synapse_dynamics, LocalOnlyConvolution): raise SynapticConfigurationException( "All targets of this Population must have a synapse_type of" @@ -96,7 +99,8 @@ def changes_during_run(self) -> bool: @overrides(AbstractLocalOnly.get_parameters_usage_in_bytes) def get_parameters_usage_in_bytes( - self, n_atoms, incoming_projections: Iterable[Projection]) -> int: + self, n_atoms: int, + incoming_projections: Iterable[Projection]) -> int: # pylint: disable=protected-access n_bytes = 0 kernel_bytes = 0 @@ -134,7 +138,8 @@ def write_parameters( sources = self.__get_sources_for_target(app_vertex) size = self.get_parameters_usage_in_bytes( - machine_vertex.vertex_slice, app_vertex.incoming_projections) + machine_vertex.vertex_slice.n_atoms, + app_vertex.incoming_projections) spec.reserve_memory_region(region, size, label="LocalOnlyConvolution") spec.switch_write_focus(region) diff --git a/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py b/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py index d3a10043d5..9efc99cfce 100644 --- a/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py +++ b/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py @@ -39,6 +39,8 @@ from spynnaker.pyNN.models.neuron import ( PopulationMachineLocalOnlyCombinedVertex) from spynnaker.pyNN.models.neuron import AbstractPopulationVertex + from spynnaker.pyNN.models.neuron.synapse_dynamics import ( + AbstractSynapseDynamics) #: Size of the source information SOURCE_INFO_SIZE = KEY_INFO_SIZE + BYTES_PER_WORD @@ -78,7 +80,8 @@ def _delay(self) -> float: return cast(float, self.delay) @overrides(AbstractLocalOnly.merge) - def merge(self, synapse_dynamics) -> LocalOnlyPoolDense: + def merge(self, synapse_dynamics: AbstractSynapseDynamics + ) -> LocalOnlyPoolDense: if not isinstance(synapse_dynamics, LocalOnlyPoolDense): raise SynapticConfigurationException( "All Projections of this Population must have a synapse_type" diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py index 83268d2ab7..0775346b37 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py @@ -11,9 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from numpy import floating +from numpy.typing import NDArray from spinn_utilities.abstract_base import AbstractBase, abstractmethod -from spinn_front_end_common.interface.ds import DataSpecificationGenerator +from spinn_front_end_common.interface.ds import DataSpecificationBase from spynnaker.pyNN.models.neuron.synapse_dynamics import ( AbstractHasParameterNames) from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( @@ -86,8 +87,8 @@ def n_weight_terms(self) -> int: @abstractmethod def write_parameters( - self, spec: DataSpecificationGenerator, global_weight_scale: float, - synapse_weight_scales: List[float]): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): """ Write the parameters of the rule to the spec. diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py index b9c363cb85..59fa753188 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from numpy import floating +from numpy.typing import NDArray from typing import Iterable from spinn_utilities.overrides import overrides -from spinn_front_end_common.interface.ds import DataSpecificationGenerator +from spinn_front_end_common.interface.ds import DataSpecificationBase from spinn_front_end_common.utilities.constants import ( BYTES_PER_SHORT, BYTES_PER_WORD) from spynnaker.pyNN.data import SpynnakerDataView @@ -136,7 +138,7 @@ def A_minus(self, new_value: float): self.__a_minus = new_value @overrides(AbstractTimingDependence.is_same_as) - def is_same_as(self, timing_dependence) -> bool: + def is_same_as(self, timing_dependence: AbstractTimingDependence) -> bool: if not isinstance( timing_dependence, TimingDependencePfisterSpikeTriplet): return False @@ -184,8 +186,8 @@ def n_weight_terms(self) -> int: @overrides(AbstractTimingDependence.write_parameters) def write_parameters( - self, spec: DataSpecificationGenerator, - global_weight_scale, synapse_weight_scales): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): # Write lookup tables spec.write_array(self.__tau_plus_data) spec.write_array(self.__tau_minus_data) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py index 4780bd55c0..71caaa81b5 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py @@ -13,7 +13,9 @@ # limitations under the License. import numpy -from typing import Iterable, cast +from numpy import floating +from numpy.typing import NDArray +from typing import cast, Iterable from spinn_utilities.overrides import overrides from spinn_front_end_common.interface.ds import ( DataType, DataSpecificationBase) @@ -105,7 +107,7 @@ def A_minus(self, new_value: float): self.__a_minus = new_value @overrides(AbstractTimingDependence.is_same_as) - def is_same_as(self, timing_dependence) -> bool: + def is_same_as(self, timing_dependence: AbstractTimingDependence) -> bool: if not isinstance(timing_dependence, TimingDependenceRecurrent): return False # pylint: disable=protected-access @@ -160,8 +162,8 @@ def n_weight_terms(self) -> int: @overrides(AbstractTimingDependence.write_parameters) def write_parameters( - self, spec: DataSpecificationBase, - global_weight_scale, synapse_weight_scales): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): # Write parameters spec.write_value(data=self.__accumulator_depression_plus_one, data_type=DataType.INT32) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py index 5b0eaeb498..25a59199d6 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from numpy import floating +from numpy.typing import NDArray from typing import Iterable from spinn_utilities.overrides import overrides from spinn_front_end_common.utilities.constants import BYTES_PER_WORD @@ -102,7 +104,8 @@ def A_minus(self, new_value: float): self.__a_minus = new_value @overrides(AbstractTimingDependence.is_same_as) - def is_same_as(self, timing_dependence) -> bool: + def is_same_as( + self, timing_dependence: AbstractTimingDependence) -> bool: if not isinstance(timing_dependence, TimingDependenceSpikeNearestPair): return False return (self.__tau_plus == timing_dependence.tau_plus and @@ -144,8 +147,8 @@ def n_weight_terms(self) -> int: @overrides(AbstractTimingDependence.write_parameters) def write_parameters( - self, spec: DataSpecificationBase, - global_weight_scale, synapse_weight_scales): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): # Write lookup tables spec.write_array(self.__tau_plus_data) spec.write_array(self.__tau_minus_data) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py index 9456fe631a..1c1caca07f 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from numpy import floating +from numpy.typing import NDArray +from typing import Iterable from spinn_utilities.overrides import overrides +from spinn_front_end_common.interface.ds import DataSpecificationBase from spinn_front_end_common.utilities.constants import ( BYTES_PER_SHORT, BYTES_PER_WORD) from spynnaker.pyNN.data import SpynnakerDataView @@ -101,7 +105,8 @@ def A_minus(self, new_value): self.__a_minus = new_value @overrides(AbstractTimingDependence.is_same_as) - def is_same_as(self, timing_dependence): + def is_same_as( + self, timing_dependence: AbstractTimingDependence) -> bool: if not isinstance(timing_dependence, TimingDependenceSpikePair): return False return (self.__tau_plus == timing_dependence.tau_plus and @@ -128,7 +133,7 @@ def pre_trace_n_bytes(self): return BYTES_PER_SHORT @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes) - def get_parameters_sdram_usage_in_bytes(self): + def get_parameters_sdram_usage_in_bytes(self) -> int: return BYTES_PER_WORD * (len(self.__tau_plus_data) + len(self.__tau_minus_data)) @@ -143,11 +148,12 @@ def n_weight_terms(self): @overrides(AbstractTimingDependence.write_parameters) def write_parameters( - self, spec, global_weight_scale, synapse_weight_scales): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): # Write lookup tables spec.write_array(self.__tau_plus_data) spec.write_array(self.__tau_minus_data) @overrides(AbstractTimingDependence.get_parameter_names) - def get_parameter_names(self): + def get_parameter_names(self) -> Iterable[str]: return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py index 6995bb1b75..23daf734db 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py @@ -12,8 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +from numpy import floating +from numpy.typing import NDArray +from typing import Iterable from spinn_utilities.overrides import overrides -from spinn_front_end_common.interface.ds import DataType +from spinn_front_end_common.interface.ds import ( + DataSpecificationBase, DataType) from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, BYTES_PER_SHORT) from spynnaker.pyNN.data import SpynnakerDataView @@ -100,7 +104,7 @@ def A_minus(self, new_value): self.__a_minus = new_value @overrides(AbstractTimingDependence.is_same_as) - def is_same_as(self, timing_dependence): + def is_same_as(self, timing_dependence: AbstractTimingDependence) -> bool: if not isinstance(timing_dependence, TimingDependenceVogels2011): return False return (self.__tau == timing_dependence.tau and @@ -126,7 +130,7 @@ def pre_trace_n_bytes(self): return BYTES_PER_SHORT @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes) - def get_parameters_sdram_usage_in_bytes(self): + def get_parameters_sdram_usage_in_bytes(self) -> int: return BYTES_PER_WORD + BYTES_PER_WORD * len(self.__tau_data) @property @@ -140,7 +144,8 @@ def n_weight_terms(self): @overrides(AbstractTimingDependence.write_parameters) def write_parameters( - self, spec, global_weight_scale, synapse_weight_scales): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): # Write alpha to spec fixed_point_alpha = float_to_fixed(self.__alpha) spec.write_value(data=fixed_point_alpha, data_type=DataType.INT32) @@ -149,5 +154,5 @@ def write_parameters( spec.write_array(self.__tau_data) @overrides(AbstractTimingDependence.get_parameter_names) - def get_parameter_names(self): + def get_parameter_names(self) -> Iterable[str]: return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py index 407e37cf72..46dc123ce1 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py @@ -11,9 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from numpy import floating +from numpy.typing import NDArray from spinn_utilities.abstract_base import AbstractBase, abstractmethod -from spinn_front_end_common.interface.ds import DataSpecificationGenerator +from spinn_front_end_common.interface.ds import DataSpecificationBase from spynnaker.pyNN.models.neuron.synapse_dynamics import ( AbstractHasParameterNames) @@ -23,7 +24,8 @@ class AbstractWeightDependence( __slots__ = () @abstractmethod - def is_same_as(self, weight_dependence) -> bool: + def is_same_as( + self, weight_dependence: "AbstractWeightDependence") -> bool: """ Determine if this weight dependence is the same as another. @@ -56,8 +58,8 @@ def get_parameters_sdram_usage_in_bytes( @abstractmethod def write_parameters( - self, spec: DataSpecificationGenerator, global_weight_scale: float, - synapse_weight_scales: List[float], n_weight_terms: int): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating], n_weight_terms: int): """ Write the parameters of the rule to the spec. diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py index a8c885f65a..3f270a59f5 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from numpy import floating +from numpy.typing import NDArray from typing import Iterable from spinn_utilities.overrides import overrides -from spinn_front_end_common.interface.ds import DataType, DataSpecificationBase +from spinn_front_end_common.interface.ds import ( + DataType, DataSpecificationBase) from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from .abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus from .abstract_weight_dependence import AbstractWeightDependence @@ -61,7 +64,7 @@ def w_max(self) -> float: return self.__w_max @overrides(AbstractWeightDependence.is_same_as) - def is_same_as(self, weight_dependence) -> bool: + def is_same_as(self, weight_dependence: AbstractWeightDependence) -> bool: if not isinstance(weight_dependence, WeightDependenceAdditive): return False return ( @@ -81,7 +84,7 @@ def vertex_executable_suffix(self) -> str: @overrides(AbstractWeightDependence.get_parameters_sdram_usage_in_bytes) def get_parameters_sdram_usage_in_bytes( - self, n_synapse_types, n_weight_terms) -> int: + self, n_synapse_types: int, n_weight_terms: int) -> int: if n_weight_terms != 1: raise NotImplementedError( "Additive weight dependence only supports one term") @@ -89,9 +92,8 @@ def get_parameters_sdram_usage_in_bytes( @overrides(AbstractWeightDependence.write_parameters) def write_parameters( - self, spec: DataSpecificationBase, - global_weight_scale, synapse_weight_scales, - n_weight_terms): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating], n_weight_terms: int): # Loop through each synapse type for _ in synapse_weight_scales: # Scale the weights diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py index 97d173a9d1..de4674afb3 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from numpy import floating +from numpy.typing import NDArray from typing import Iterable from spinn_utilities.overrides import overrides from spinn_front_end_common.interface.ds import ( @@ -92,7 +94,7 @@ def A3_minus(self) -> float: return self.__a3_minus @overrides(AbstractWeightDependence.is_same_as) - def is_same_as(self, weight_dependence) -> bool: + def is_same_as(self, weight_dependence: AbstractWeightDependence) -> bool: if not isinstance(weight_dependence, WeightDependenceAdditiveTriplet): return False return ( @@ -114,7 +116,7 @@ def vertex_executable_suffix(self) -> str: @overrides(AbstractWeightDependence.get_parameters_sdram_usage_in_bytes) def get_parameters_sdram_usage_in_bytes( - self, n_synapse_types, n_weight_terms) -> int: + self, n_synapse_types: int, n_weight_terms: int) -> int: if n_weight_terms != 2: raise NotImplementedError( "Additive weight dependence only supports one or two terms") @@ -123,7 +125,7 @@ def get_parameters_sdram_usage_in_bytes( @overrides(AbstractWeightDependence.write_parameters) def write_parameters( self, spec: DataSpecificationBase, global_weight_scale: float, - synapse_weight_scales, n_weight_terms): + synapse_weight_scales: NDArray[floating], n_weight_terms: int): # Loop through each synapse type for _ in synapse_weight_scales: # Scale the weights diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index 71d3c8f43a..209a9154e5 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from numpy import floating +from numpy.typing import NDArray from typing import Iterable from spinn_utilities.overrides import overrides -from spinn_front_end_common.interface.ds import DataType, DataSpecificationBase +from spinn_front_end_common.interface.ds import ( + DataType, DataSpecificationBase) from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from .abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus from .abstract_weight_dependence import AbstractWeightDependence @@ -59,7 +62,7 @@ def w_max(self) -> float: return self.__w_max @overrides(AbstractWeightDependence.is_same_as) - def is_same_as(self, weight_dependence) -> bool: + def is_same_as(self, weight_dependence: AbstractWeightDependence) -> bool: if not isinstance(weight_dependence, WeightDependenceMultiplicative): return False return ( @@ -79,7 +82,7 @@ def vertex_executable_suffix(self) -> str: @overrides(AbstractWeightDependence.get_parameters_sdram_usage_in_bytes) def get_parameters_sdram_usage_in_bytes( - self, n_synapse_types, n_weight_terms) -> int: + self, n_synapse_types: int, n_weight_terms: int) -> int: if n_weight_terms != 1: raise NotImplementedError( "Multiplicative weight dependence only supports single terms") @@ -88,8 +91,8 @@ def get_parameters_sdram_usage_in_bytes( @overrides(AbstractWeightDependence.write_parameters) def write_parameters( - self, spec: DataSpecificationBase, - global_weight_scale, synapse_weight_scales, n_weight_terms): + self, spec: DataSpecificationBase, global_weight_scale: float, + synapse_weight_scales: NDArray[floating], n_weight_terms: int): if n_weight_terms != 1: raise NotImplementedError( "Multiplicative weight dependence only supports single terms") diff --git a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py index 0b6722c7f2..6f33ed0379 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py @@ -316,7 +316,7 @@ def __write_local_only_data(self, spec: DataSpecificationGenerator): @overrides(AbstractRewritesDataSpecification.regenerate_data_specification) def regenerate_data_specification( - self, spec: DataSpecificationReloader, placement): + self, spec: DataSpecificationReloader, placement: Placement): self._rewrite_neuron_data_spec(spec) # close spec diff --git a/spynnaker/pyNN/models/neuron/population_machine_neurons.py b/spynnaker/pyNN/models/neuron/population_machine_neurons.py index d94d081846..7448964125 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_neurons.py +++ b/spynnaker/pyNN/models/neuron/population_machine_neurons.py @@ -137,7 +137,7 @@ def _has_key(self) -> bool: raise NotImplementedError @abstractmethod - def _set_key(self, key: int) -> None: + def _set_key(self, key: int): """ Set the key for spikes. diff --git a/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py index 00d6147089..a32a48b650 100644 --- a/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py @@ -316,7 +316,7 @@ def n_bytes_for_transfer(self) -> int: self._pop_vertex.neuron_impl.get_n_synapse_types()) @overrides(ReceivesSynapticInputsOverSDRAM.sdram_requirement) - def sdram_requirement(self, sdram_machine_edge: SDRAMMachineEdge): + def sdram_requirement(self, sdram_machine_edge: SDRAMMachineEdge) -> int: if isinstance(sdram_machine_edge.pre_vertex, SendsSynapticInputsOverSDRAM): return self.n_bytes_for_transfer diff --git a/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection.py b/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection.py index f902aee6fd..a47e95401a 100644 --- a/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection.py +++ b/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/last_neuron_selection.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable from spinn_utilities.overrides import overrides from .abstract_partner_selection import AbstractPartnerSelection from spinn_front_end_common.interface.ds import DataSpecificationBase @@ -33,11 +34,11 @@ def __init__(self, spike_buffer_size=64): @property @overrides(AbstractPartnerSelection.vertex_executable_suffix) - def vertex_executable_suffix(self): + def vertex_executable_suffix(self) -> str: return "_last_neuron" @overrides(AbstractPartnerSelection.get_parameters_sdram_usage_in_bytes) - def get_parameters_sdram_usage_in_bytes(self): + def get_parameters_sdram_usage_in_bytes(self) -> int: return 4 @overrides(AbstractPartnerSelection.write_parameters) @@ -45,5 +46,5 @@ def write_parameters(self, spec: DataSpecificationBase): spec.write_value(self.__spike_buffer_size) @overrides(AbstractPartnerSelection.get_parameter_names) - def get_parameter_names(self): + def get_parameter_names(self) -> Iterable[str]: yield "spike_buffer_size" diff --git a/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection.py b/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection.py index 0476b6f76e..8b51112c8c 100644 --- a/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection.py +++ b/spynnaker/pyNN/models/neuron/structural_plasticity/synaptogenesis/partner_selection/random_selection.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable from spinn_utilities.overrides import overrides +from spinn_front_end_common.interface.ds import DataSpecificationBase from .abstract_partner_selection import AbstractPartnerSelection @@ -25,17 +27,17 @@ class RandomSelection(AbstractPartnerSelection): @property @overrides(AbstractPartnerSelection.vertex_executable_suffix) - def vertex_executable_suffix(self): + def vertex_executable_suffix(self) -> str: return "_random" @overrides(AbstractPartnerSelection.get_parameters_sdram_usage_in_bytes) - def get_parameters_sdram_usage_in_bytes(self): + def get_parameters_sdram_usage_in_bytes(self) -> int: return 0 @overrides(AbstractPartnerSelection.write_parameters) - def write_parameters(self, spec): + def write_parameters(self, spec: DataSpecificationBase): pass @overrides(AbstractPartnerSelection.get_parameter_names) - def get_parameter_names(self): + def get_parameter_names(self) -> Iterable[str]: return () diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_plastic_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_plastic_synapse_dynamics.py index 9849af679e..6c3f1cc121 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_plastic_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_plastic_synapse_dynamics.py @@ -13,7 +13,7 @@ # limitations under the License. from numpy import integer, uint32 from numpy.typing import NDArray -from typing import List, Tuple +from typing import List, Tuple, Union from spinn_utilities.abstract_base import AbstractBase, abstractmethod from .abstract_sdram_synapse_dynamics import AbstractSDRAMSynapseDynamics from spynnaker.pyNN.models.neuron.synapse_dynamics.types import ( @@ -44,9 +44,11 @@ def get_plastic_synaptic_data( self, connections: ConnectionsArray, connection_row_indices: NDArray[integer], n_rows: int, n_synapse_types: int, - max_n_synapses: int, max_atoms_per_core: int) -> Tuple[ - NDArray[uint32], NDArray[uint32], NDArray[uint32], - NDArray[uint32]]: + max_n_synapses: int, max_atoms_per_core: int) -> Union[ + Tuple[NDArray[uint32], NDArray[uint32], + NDArray[uint32], NDArray[uint32]], + Tuple[List[NDArray[uint32]], List[NDArray[uint32]], + NDArray[uint32], NDArray[uint32]]]: """ Get the fixed-plastic data, and plastic-plastic data for each row, and lengths for the fixed_plastic and plastic-plastic parts of each row. diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py index b9827d0010..f981e33dad 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics_structural.py @@ -14,7 +14,7 @@ from __future__ import annotations from numpy import floating from numpy.typing import NDArray -from typing import Iterable, Optional, Tuple, Union, TYPE_CHECKING +from typing import Iterable, Optional, TYPE_CHECKING from typing_extensions import TypeAlias from spinn_utilities.abstract_base import AbstractBase, abstractmethod from pacman.model.graphs.common import Slice @@ -34,8 +34,9 @@ from spynnaker.pyNN.models.neural_projections import ( ProjectionApplicationEdge, SynapseInformation) +# see https://github.com/SpiNNakerManchester/sPyNNaker/issues/1427 #: :meta private: -InitialDelay: TypeAlias = Union[float, Tuple[float, float]] +InitialDelay: TypeAlias = float class AbstractSynapseDynamicsStructural(object, metaclass=AbstractBase): diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_neuromodulation.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_neuromodulation.py index 106f6793eb..4e9a3aaccc 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_neuromodulation.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_neuromodulation.py @@ -13,7 +13,7 @@ # limitations under the License. from __future__ import annotations import numpy -from numpy import integer, uint8, uint32 +from numpy import floating, integer, uint8, uint32 from numpy.typing import NDArray from pyNN.standardmodels.synapses import StaticSynapse from typing import Iterable, List, Optional, Tuple, TYPE_CHECKING @@ -37,6 +37,7 @@ from spynnaker.pyNN.models.neuron.synapse_io import MaxRowInfo from spynnaker.pyNN.models.neuron.synapse_dynamics.types import ( ConnectionsArray) + from .abstract_synapse_dynamics import AbstractSynapseDynamics # The targets of neuromodulation NEUROMODULATION_TARGETS = { @@ -100,7 +101,8 @@ def w_max(self) -> float: return self.__w_max @overrides(AbstractPlasticSynapseDynamics.merge) - def merge(self, synapse_dynamics): + def merge(self, synapse_dynamics: AbstractSynapseDynamics + ) -> AbstractSynapseDynamics: # This must replace something that supports neuromodulation, # so it can't be the first thing to be merged! raise SynapticConfigurationException( @@ -109,7 +111,7 @@ def merge(self, synapse_dynamics): " neuromodulation") @overrides(AbstractPlasticSynapseDynamics.is_same_as) - def is_same_as(self, synapse_dynamics) -> bool: + def is_same_as(self, synapse_dynamics: AbstractSynapseDynamics) -> bool: # Shouln't ever come up, but if it does, it is False! return False @@ -124,7 +126,7 @@ def get_vertex_executable_suffix(self) -> str: @overrides(AbstractPlasticSynapseDynamics .get_parameters_sdram_usage_in_bytes) def get_parameters_sdram_usage_in_bytes( - self, n_neurons, n_synapse_types) -> int: + self, n_neurons: int, n_synapse_types: int) -> int: size = BYTES_PER_WORD * 3 size += BYTES_PER_WORD * len(self.__tau_c_data) size += BYTES_PER_WORD * len(self.__tau_d_data) @@ -132,8 +134,9 @@ def get_parameters_sdram_usage_in_bytes( @overrides(AbstractPlasticSynapseDynamics.write_parameters) def write_parameters( - self, spec: DataSpecificationBase, region, global_weight_scale, - synapse_weight_scales): + self, spec: DataSpecificationBase, region: int, + global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): # Calculate constant component in Izhikevich's model weight update # function and write to SDRAM. weight_update_component = \ @@ -178,10 +181,10 @@ def get_n_words_for_plastic_connections(self, n_connections: int) -> int: def get_plastic_synaptic_data( self, connections: ConnectionsArray, connection_row_indices: NDArray[integer], n_rows: int, - n_synapse_types, max_n_synapses: int, - max_atoms_per_core) -> Tuple[ - NDArray[uint32], NDArray[uint32], - NDArray[uint32], NDArray[uint32]]: + n_synapse_types: int, + max_n_synapses: int, max_atoms_per_core: int) -> Tuple[ + NDArray[uint32], NDArray[uint32], NDArray[uint32], + NDArray[uint32]]: # pylint: disable=too-many-arguments weights = numpy.rint( numpy.abs(connections["weight"]) * STDP_FIXED_POINT_ONE) @@ -233,10 +236,10 @@ def get_n_synapses_in_rows( @overrides(AbstractPlasticSynapseDynamics.read_plastic_synaptic_data) def read_plastic_synaptic_data( - self, n_synapse_types, - pp_size: NDArray[integer], pp_data: List[NDArray[uint32]], - fp_size: NDArray[integer], fp_data: List[NDArray[uint32]], - max_atoms_per_core) -> ConnectionsArray: + self, n_synapse_types: int, + pp_size: NDArray[uint32], pp_data: List[NDArray[uint32]], + fp_size: NDArray[uint32], fp_data: List[NDArray[uint32]], + max_atoms_per_core: int) -> ConnectionsArray: data = numpy.concatenate(fp_data) connections = numpy.zeros(data.size, dtype=NUMPY_CONNECTORS_DTYPE) connections["source"] = numpy.concatenate( @@ -292,7 +295,7 @@ def pad_to_length(self) -> None: return None @overrides(AbstractPlasticSynapseDynamics.get_synapse_id_by_target) - def get_synapse_id_by_target(self, target) -> Optional[int]: + def get_synapse_id_by_target(self, target: str) -> Optional[int]: return NEUROMODULATION_TARGETS.get(target, None) @property diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index a4959b9608..58d09849f8 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -13,11 +13,12 @@ # limitations under the License. from __future__ import annotations import numpy -from numpy import uint8, uint32, integer +from numpy import floating, integer, uint8, uint32 from numpy.typing import NDArray from pyNN.standardmodels.synapses import StaticSynapse from typing import Iterable, List, Optional, Tuple, TYPE_CHECKING from spinn_utilities.overrides import overrides +from spinn_front_end_common.interface.ds import DataSpecificationBase from spynnaker.pyNN.models.neuron.synapse_dynamics.types import ( NUMPY_CONNECTORS_DTYPE) from .abstract_static_synapse_dynamics import AbstractStaticSynapseDynamics @@ -87,12 +88,14 @@ def get_vertex_executable_suffix(self) -> str: @overrides(AbstractStaticSynapseDynamics. get_parameters_sdram_usage_in_bytes) def get_parameters_sdram_usage_in_bytes( - self, n_neurons, n_synapse_types) -> int: + self, n_neurons: int, n_synapse_types: int) -> int: return 0 @overrides(AbstractStaticSynapseDynamics.write_parameters) def write_parameters( - self, spec, region, global_weight_scale, synapse_weight_scales): + self, spec: DataSpecificationBase, region: int, + global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): # Nothing to do here pass @@ -160,7 +163,9 @@ def get_n_synapses_in_rows(self, ff_size: NDArray) -> NDArray: @overrides(AbstractStaticSynapseDynamics.read_static_synaptic_data) def read_static_synaptic_data( - self, n_synapse_types, ff_size, ff_data, max_atoms_per_core): + self, n_synapse_types: int, ff_size: NDArray[integer], + ff_data: List[NDArray[uint32]], + max_atoms_per_core: int) -> ConnectionsArray: n_synapse_type_bits = get_n_bits(n_synapse_types) n_neuron_id_bits = get_n_bits(max_atoms_per_core) neuron_id_mask = (1 << n_neuron_id_bits) - 1 @@ -181,12 +186,12 @@ def get_parameter_names(self) -> Iterable[str]: return ('weight', 'delay') @overrides(AbstractStaticSynapseDynamics.get_max_synapses) - def get_max_synapses(self, n_words): + def get_max_synapses(self, n_words: int) -> int: return n_words @property @overrides(AbstractGenerateOnMachine.gen_matrix_id) - def gen_matrix_id(self): + def gen_matrix_id(self) -> int: return MatrixGeneratorID.STATIC_MATRIX.value @overrides(AbstractGenerateOnMachine.gen_matrix_params) @@ -214,20 +219,20 @@ def gen_matrix_params( @property @overrides(AbstractGenerateOnMachine. gen_matrix_params_size_in_bytes) - def gen_matrix_params_size_in_bytes(self): + def gen_matrix_params_size_in_bytes(self) -> int: return 12 * BYTES_PER_WORD @property @overrides(AbstractStaticSynapseDynamics.changes_during_run) - def changes_during_run(self): + def changes_during_run(self) -> bool: return False @property @overrides(AbstractStaticSynapseDynamics.pad_to_length) - def pad_to_length(self): + def pad_to_length(self) -> Optional[int]: return self.__pad_to_length @property @overrides(AbstractStaticSynapseDynamics.is_combined_core_capable) - def is_combined_core_capable(self): + def is_combined_core_capable(self) -> bool: return True diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 0f9bead5d5..1a90a0e851 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -14,11 +14,12 @@ from __future__ import annotations import math import numpy -from numpy import uint8, uint16, uint32 +from numpy import floating, integer, uint8, uint16, uint32 from numpy.typing import NDArray from pyNN.standardmodels.synapses import StaticSynapse -from typing import Iterable, List, Optional, TYPE_CHECKING +from typing import Any, Iterable, List, Optional, Tuple, TYPE_CHECKING from spinn_utilities.overrides import overrides +from spinn_front_end_common.interface.ds import DataSpecificationBase from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, BYTES_PER_SHORT) from spynnaker.pyNN.data import SpynnakerDataView @@ -35,15 +36,21 @@ from .abstract_generate_on_machine import ( AbstractGenerateOnMachine, MatrixGeneratorID) from .synapse_dynamics_neuromodulation import SynapseDynamicsNeuromodulation +from spynnaker.pyNN.models.neural_projections.connectors import ( + AbstractConnector) from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence.\ abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus if TYPE_CHECKING: + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) from spynnaker.pyNN.models.neuron.synapse_dynamics.types import ( ConnectionsArray) from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence.\ abstract_timing_dependence import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence.\ abstract_weight_dependence import AbstractWeightDependence + from spynnaker.pyNN.models.neuron.synapse_io import MaxRowInfo + from .abstract_synapse_dynamics import AbstractSynapseDynamics # How large are the time-stamps stored with each event TIME_STAMP_BYTES = BYTES_PER_WORD @@ -130,7 +137,8 @@ def _merge_neuromodulation( " edges to the same Population") @overrides(AbstractPlasticSynapseDynamics.merge) - def merge(self, synapse_dynamics): + def merge(self, synapse_dynamics: AbstractSynapseDynamics + ) -> AbstractSynapseDynamics: # If dynamics is Neuromodulation, merge with other neuromodulation, # and then return ourselves, as neuromodulation can't be used by # itself @@ -166,14 +174,14 @@ def merge(self, synapse_dynamics): None, self.dendritic_delay_fraction, synapse_dynamics.f_rew, synapse_dynamics.initial_weight, synapse_dynamics.initial_delay, synapse_dynamics.s_max, - synapse_dynamics.seed, + seed=synapse_dynamics.seed, backprop_delay=self.backprop_delay) # Otherwise, it is static or neuromodulation, so return ourselves return self @overrides(AbstractPlasticSynapseDynamics.get_value) - def get_value(self, key): + def get_value(self, key: str) -> Any: for obj in [self.__timing_dependence, self.__weight_dependence, self]: if hasattr(obj, key): return getattr(obj, key) @@ -181,7 +189,7 @@ def get_value(self, key): f"Type {type(self)} does not have parameter {key}") @overrides(AbstractPlasticSynapseDynamics.set_value) - def set_value(self, key, value): + def set_value(self, key: str, value: Any): for obj in [self.__timing_dependence, self.__weight_dependence, self]: if hasattr(obj, key): setattr(obj, key, value) @@ -238,7 +246,7 @@ def neuromodulation(self) -> Optional[SynapseDynamicsNeuromodulation]: return self.__neuromodulation @overrides(AbstractPlasticSynapseDynamics.is_same_as) - def is_same_as(self, synapse_dynamics): + def is_same_as(self, synapse_dynamics: AbstractSynapseDynamics) -> bool: if not isinstance(synapse_dynamics, SynapseDynamicsSTDP): return False return ( @@ -249,7 +257,7 @@ def is_same_as(self, synapse_dynamics): (self.__dendritic_delay_fraction == synapse_dynamics.dendritic_delay_fraction)) - def get_vertex_executable_suffix(self): + def get_vertex_executable_suffix(self) -> str: """ :rtype: str """ @@ -284,7 +292,9 @@ def get_parameters_sdram_usage_in_bytes(self, n_neurons, n_synapse_types): @overrides(AbstractPlasticSynapseDynamics.write_parameters) def write_parameters( - self, spec, region, global_weight_scale, synapse_weight_scales): + self, spec: DataSpecificationBase, region: int, + global_weight_scale: float, + synapse_weight_scales: NDArray[floating]): spec.comment("Writing Plastic Parameters") # Switch focus to the region: @@ -354,8 +364,12 @@ def get_n_words_for_plastic_connections(self, n_connections): @overrides(AbstractPlasticSynapseDynamics.get_plastic_synaptic_data) def get_plastic_synaptic_data( - self, connections: ConnectionsArray, connection_row_indices, - n_rows, n_synapse_types, max_n_synapses, max_atoms_per_core): + self, connections: ConnectionsArray, + connection_row_indices: NDArray[integer], n_rows: int, + n_synapse_types: int, + max_n_synapses: int, max_atoms_per_core: int) -> Tuple[ + List[NDArray[uint32]], List[NDArray[uint32]], + NDArray[uint32], NDArray[uint32]]: n_synapse_type_bits = get_n_bits(n_synapse_types) n_neuron_id_bits = get_n_bits(max_atoms_per_core) neuron_id_mask = (1 << n_neuron_id_bits) - 1 @@ -435,26 +449,31 @@ def _pad_row(self, rows: List[NDArray], no_bytes_per_connection: int): @overrides( AbstractPlasticSynapseDynamics.get_n_plastic_plastic_words_per_row) - def get_n_plastic_plastic_words_per_row(self, pp_size): + def get_n_plastic_plastic_words_per_row( + self, pp_size: NDArray[uint32]) -> NDArray[integer]: # pp_size is in words, so return return pp_size @overrides( AbstractPlasticSynapseDynamics.get_n_fixed_plastic_words_per_row) - def get_n_fixed_plastic_words_per_row(self, fp_size): + def get_n_fixed_plastic_words_per_row( + self, fp_size: NDArray[uint32]) -> NDArray[integer]: # fp_size is in half-words return numpy.ceil(fp_size / 2.0).astype(dtype=uint32) @overrides(AbstractPlasticSynapseDynamics.get_n_synapses_in_rows) - def get_n_synapses_in_rows(self, pp_size, fp_size): + def get_n_synapses_in_rows(self, pp_size: NDArray[uint32], + fp_size: NDArray[uint32]) -> NDArray[integer]: # Each fixed-plastic synapse is a half-word and fp_size is in half # words so just return it return fp_size @overrides(AbstractPlasticSynapseDynamics.read_plastic_synaptic_data) def read_plastic_synaptic_data( - self, n_synapse_types, pp_size, pp_data, fp_size, fp_data, - max_atoms_per_core) -> ConnectionsArray: + self, n_synapse_types: int, pp_size: NDArray[uint32], + pp_data: List[NDArray[uint32]], fp_size: NDArray[uint32], + fp_data: List[NDArray[uint32]], + max_atoms_per_core: int) -> ConnectionsArray: # pylint: disable=too-many-arguments n_rows = len(fp_size) @@ -489,20 +508,23 @@ def read_plastic_synaptic_data( return connections @overrides(AbstractPlasticSynapseDynamics.get_weight_mean) - def get_weight_mean(self, connector, synapse_info): + def get_weight_mean(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: # Because the weights could all be changed to the maximum, the mean # has to be given as the maximum for scaling return self.get_weight_maximum(connector, synapse_info) @overrides(AbstractPlasticSynapseDynamics.get_weight_variance) def get_weight_variance( - self, connector, weights: Weight_Types, synapse_info): + self, connector: AbstractConnector, weights: Weight_Types, + synapse_info: SynapseInformation) -> float: # Because the weights could all be changed to the maximum, the variance # has to be given as no variance return 0.0 @overrides(AbstractPlasticSynapseDynamics.get_weight_maximum) - def get_weight_maximum(self, connector, synapse_info): + def get_weight_maximum(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: w_max = super().get_weight_maximum(connector, synapse_info) # The maximum weight is the largest that it could be set to from # the weight dependence @@ -516,7 +538,7 @@ def get_parameter_names(self) -> Iterable[str]: yield from self.__weight_dependence.get_parameter_names() @overrides(AbstractPlasticSynapseDynamics.get_max_synapses) - def get_max_synapses(self, n_words): + def get_max_synapses(self, n_words: int) -> int: # Subtract the header size that will always exist n_header_words = self._n_header_bytes // BYTES_PER_WORD n_words_space = n_words - n_header_words @@ -547,14 +569,16 @@ def get_max_synapses(self, n_words): @property @overrides(AbstractGenerateOnMachine.gen_matrix_id) - def gen_matrix_id(self): + def gen_matrix_id(self) -> int: return MatrixGeneratorID.STDP_MATRIX.value @overrides(AbstractGenerateOnMachine.gen_matrix_params) def gen_matrix_params( - self, synaptic_matrix_offset, delayed_matrix_offset, app_edge, - synapse_info, max_row_info, max_pre_atoms_per_core, - max_post_atoms_per_core): + self, synaptic_matrix_offset: int, delayed_matrix_offset: int, + app_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation, max_row_info: MaxRowInfo, + max_pre_atoms_per_core: int, max_post_atoms_per_core: int + ) -> NDArray[uint32]: vertex = app_edge.post_vertex n_synapse_type_bits = get_n_bits( vertex.neuron_impl.get_n_synapse_types()) @@ -580,22 +604,21 @@ def gen_matrix_params( dtype=uint32) @property - @overrides(AbstractGenerateOnMachine. - gen_matrix_params_size_in_bytes) - def gen_matrix_params_size_in_bytes(self): + @overrides(AbstractGenerateOnMachine.gen_matrix_params_size_in_bytes) + def gen_matrix_params_size_in_bytes(self) -> int: return 17 * BYTES_PER_WORD @property @overrides(AbstractPlasticSynapseDynamics.changes_during_run) - def changes_during_run(self): + def changes_during_run(self) -> bool: return True @property @overrides(AbstractPlasticSynapseDynamics.is_combined_core_capable) - def is_combined_core_capable(self): + def is_combined_core_capable(self) -> bool: return self.__neuromodulation is None @property @overrides(AbstractPlasticSynapseDynamics.pad_to_length) - def pad_to_length(self): + def pad_to_length(self) -> Optional[int]: return self.__pad_to_length diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py index 236c8dbc50..83cd1f9271 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py @@ -14,7 +14,8 @@ from __future__ import annotations import numpy from pyNN.standardmodels.synapses import StaticSynapse -from typing import Any, Dict, Iterable, Optional, Tuple, TYPE_CHECKING +from typing import ( + Any, Dict, Iterable, Optional, Tuple, TYPE_CHECKING, Sequence, Union) from spinn_utilities.overrides import overrides from pacman.model.graphs.application import ApplicationVertex from pacman.model.graphs.common import Slice @@ -34,8 +35,12 @@ from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID from .abstract_synapse_dynamics import AbstractSynapseDynamics if TYPE_CHECKING: + from pacman.model.graphs import AbstractVertex + from pacman.model.graphs.machine import MachineVertex from spynnaker.pyNN.models.neural_projections import ( ProjectionApplicationEdge, SynapseInformation) + from spynnaker.pyNN.models.neural_projections.connectors import ( + AbstractConnector) from spynnaker.pyNN.models.neuron.synapse_dynamics.\ abstract_synapse_dynamics_structural import ( InitialDelay) @@ -184,7 +189,15 @@ def set_projection_parameter(self, param: str, value): raise ValueError(f"Unknown parameter {param}") @overrides(AbstractStaticSynapseDynamics.is_same_as) - def is_same_as(self, synapse_dynamics) -> bool: + @overrides(_Common.is_same_as) + def is_same_as(self, synapse_dynamics: Union[ + AbstractSynapseDynamics, + AbstractSynapseDynamicsStructural]) -> bool: + if not (isinstance(synapse_dynamics, SynapseDynamicsStructuralStatic)): + return False + if not AbstractStaticSynapseDynamics.is_same_as( + self, synapse_dynamics): + return False return _Common.is_same_as(self, synapse_dynamics) @overrides(AbstractStaticSynapseDynamics.get_vertex_executable_suffix) @@ -226,37 +239,37 @@ def seed(self) -> Optional[int]: @property @overrides(AbstractSynapseDynamicsStructural.s_max) - def s_max(self): + def s_max(self) -> int: return self.__s_max @property @overrides(AbstractSynapseDynamicsStructural.with_replacement) - def with_replacement(self): + def with_replacement(self) -> bool: return self.__with_replacement @property @overrides(AbstractSynapseDynamicsStructural.initial_weight) - def initial_weight(self): + def initial_weight(self) -> float: return self.__initial_weight @property @overrides(AbstractSynapseDynamicsStructural.initial_delay) - def initial_delay(self): + def initial_delay(self) -> InitialDelay: return self.__initial_delay @property @overrides(AbstractSynapseDynamicsStructural.partner_selection) - def partner_selection(self): + def partner_selection(self) -> AbstractPartnerSelection: return self.__partner_selection @property @overrides(AbstractSynapseDynamicsStructural.formation) - def formation(self): + def formation(self) -> AbstractFormation: return self.__formation @property @overrides(AbstractSynapseDynamicsStructural.elimination) - def elimination(self): + def elimination(self) -> AbstractElimination: return self.__elimination @property @@ -265,35 +278,49 @@ def connections(self) -> ConnectionsInfo: return self.__connections @overrides(SynapseDynamicsStatic.get_weight_mean) - def get_weight_mean(self, connector, synapse_info): + def get_weight_mean(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: return self.get_weight_maximum(connector, synapse_info) @overrides(SynapseDynamicsStatic.get_weight_variance) def get_weight_variance( - self, connector, weights: Weight_Types, synapse_info): + self, connector: AbstractConnector, weights: Weight_Types, + synapse_info: SynapseInformation) -> float: return 0.0 @overrides(SynapseDynamicsStatic.get_weight_maximum) - def get_weight_maximum(self, connector, synapse_info): + def get_weight_maximum(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: w_m = super().get_weight_maximum(connector, synapse_info) return max(w_m, self.__initial_weight) @overrides(SynapseDynamicsStatic.get_delay_maximum) - def get_delay_maximum(self, connector, synapse_info): + def get_delay_maximum(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> Optional[float]: d_m = super().get_delay_maximum(connector, synapse_info) + if d_m is None: + return self.__initial_delay return max(d_m, self.__initial_delay) @overrides(SynapseDynamicsStatic.get_delay_minimum) - def get_delay_minimum(self, connector, synapse_info): + def get_delay_minimum( + self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> Optional[float]: d_m = super().get_delay_minimum(connector, synapse_info) + if d_m is None: + return self.__initial_delay return min(d_m, self.__initial_delay) @overrides(SynapseDynamicsStatic.get_delay_variance) - def get_delay_variance(self, connector, delays, synapse_info): + def get_delay_variance( + self, connector: AbstractConnector, delays: numpy.ndarray, + synapse_info: SynapseInformation) -> float: return 0.0 @overrides(_Common._get_seeds) - def _get_seeds(self, app_vertex=None) -> Tuple[int, ...]: + def _get_seeds( + self, app_vertex: Union[None, ApplicationVertex, Slice] = None + ) -> Sequence[int]: if app_vertex: if app_vertex not in self.__seeds.keys(): self.__seeds[app_vertex] = ( @@ -303,14 +330,16 @@ def _get_seeds(self, app_vertex=None) -> Tuple[int, ...]: return create_mars_kiss_seeds(self.__rng) @overrides(SynapseDynamicsStatic.generate_on_machine) - def generate_on_machine(self): + def generate_on_machine(self) -> bool: # Never generate structural connections on the machine return False @overrides(AbstractSynapseDynamics.get_connected_vertices) def get_connected_vertices( self, s_info: SynapseInformation, source_vertex: ApplicationVertex, - target_vertex: ApplicationVertex): + target_vertex: ApplicationVertex) -> Sequence[ + Tuple[MachineVertex, Sequence[AbstractVertex]]]: + # Things change, so assume all connected return [(m_vertex, [source_vertex]) for m_vertex in target_vertex.splitter.get_in_coming_vertices( @@ -318,5 +347,5 @@ def get_connected_vertices( @property @overrides(AbstractSynapseDynamics.is_combined_core_capable) - def is_combined_core_capable(self): + def is_combined_core_capable(self) -> bool: return False diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py index 0c91bb65cd..06f3ad2047 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py @@ -14,7 +14,8 @@ from __future__ import annotations import numpy from pyNN.standardmodels.synapses import StaticSynapse -from typing import Dict, Optional, Tuple, TYPE_CHECKING +from typing import ( + Dict, Iterable, Optional, Sequence, Tuple, TYPE_CHECKING, Union) from spinn_utilities.overrides import overrides from pacman.model.graphs.application import ApplicationVertex from spynnaker.pyNN.exceptions import SynapticConfigurationException @@ -31,6 +32,9 @@ from .abstract_synapse_dynamics import AbstractSynapseDynamics from .abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics if TYPE_CHECKING: + from pacman.model.graphs import AbstractVertex + from pacman.model.graphs.machine import MachineVertex + from pacman.model.graphs.common import Slice from spynnaker.pyNN.models.neuron.structural_plasticity.synaptogenesis.\ partner_selection.abstract_partner_selection import \ AbstractPartnerSelection @@ -42,7 +46,10 @@ abstract_timing_dependence import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence.\ abstract_weight_dependence import AbstractWeightDependence - from spynnaker.pyNN.models.neural_projections import SynapseInformation + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) + from spynnaker.pyNN.models.neural_projections.connectors import ( + AbstractConnector) from spynnaker.pyNN.models.neuron.synapse_dynamics.types import ( ConnectionsArray) from .synapse_dynamics_structural_common import ConnectionsInfo @@ -156,7 +163,8 @@ def __init__( self.__seeds: Dict[object, Tuple[int, ...]] = dict() @overrides(AbstractPlasticSynapseDynamics.merge) - def merge(self, synapse_dynamics) -> SynapseDynamicsStructuralSTDP: + def merge(self, synapse_dynamics: AbstractSynapseDynamics + ) -> SynapseDynamicsStructuralSTDP: # If dynamics is Neuromodulation, merge with other neuromodulation, # and then return ourselves, as neuromodulation can't be used by # itself @@ -194,9 +202,13 @@ def set_projection_parameter(self, param: str, value): raise ValueError(f"Unknown parameter {param}") @overrides(AbstractPlasticSynapseDynamics.is_same_as) - def is_same_as(self, synapse_dynamics) -> bool: - if (isinstance(synapse_dynamics, SynapseDynamicsSTDP) and - not super().is_same_as(synapse_dynamics)): + @overrides(SynapseDynamicsStructuralCommon.is_same_as) + def is_same_as(self, synapse_dynamics: Union[ + AbstractSynapseDynamics, + AbstractSynapseDynamicsStructural]) -> bool: + if not (isinstance(synapse_dynamics, SynapseDynamicsStructuralSTDP)): + return False + if not SynapseDynamicsSTDP.is_same_as(self, synapse_dynamics): return False return SynapseDynamicsStructuralCommon.is_same_as( self, synapse_dynamics) @@ -209,7 +221,8 @@ def get_vertex_executable_suffix(self) -> str: @overrides(AbstractSynapseDynamicsStructural.set_connections) def set_connections( - self, connections: ConnectionsArray, post_vertex_slice, app_edge, + self, connections: ConnectionsArray, post_vertex_slice: Slice, + app_edge: ProjectionApplicationEdge, synapse_info: SynapseInformation): if not isinstance(synapse_info.synapse_dynamics, AbstractSynapseDynamicsStructural): @@ -219,7 +232,7 @@ def set_connections( collector.append((connections, app_edge, synapse_info)) @overrides(AbstractPlasticSynapseDynamics.get_parameter_names) - def get_parameter_names(self): + def get_parameter_names(self) -> Iterable[str]: yield from super().get_parameter_names() yield from SynapseDynamicsStructuralCommon.get_parameter_names(self) @@ -274,35 +287,45 @@ def connections(self) -> ConnectionsInfo: return self.__connections @overrides(AbstractPlasticSynapseDynamics.get_weight_mean) - def get_weight_mean(self, connector, synapse_info: SynapseInformation): + def get_weight_mean(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: # Claim the mean is the maximum, a massive but safe overestimation return self.get_weight_maximum(connector, synapse_info) @overrides(AbstractPlasticSynapseDynamics.get_weight_maximum) - def get_weight_maximum(self, connector, synapse_info: SynapseInformation): + def get_weight_maximum( + self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: w_max = super().get_weight_maximum(connector, synapse_info) return max(w_max, self.__initial_weight) @overrides(SynapseDynamicsSTDP.get_delay_maximum) - def get_delay_maximum(self, connector, synapse_info: SynapseInformation): + def get_delay_maximum( + self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> Optional[float]: d_m = super().get_delay_maximum(connector, synapse_info) if d_m is None: return self.__initial_delay return max(d_m, self.__initial_delay) @overrides(SynapseDynamicsSTDP.get_delay_minimum) - def get_delay_minimum(self, connector, synapse_info: SynapseInformation): + def get_delay_minimum(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> Optional[float]: d_m = super().get_delay_minimum(connector, synapse_info) if d_m is None: return self.__initial_delay return min(d_m, self.__initial_delay) @overrides(SynapseDynamicsSTDP.get_delay_variance) - def get_delay_variance(self, connector, delays, synapse_info): + def get_delay_variance( + self, connector: AbstractConnector, delays: numpy.ndarray, + synapse_info: SynapseInformation) -> float: return 0.0 @overrides(SynapseDynamicsStructuralCommon._get_seeds) - def _get_seeds(self, app_vertex=None) -> Tuple[int, ...]: + def _get_seeds( + self, app_vertex: Union[None, ApplicationVertex, Slice] = None + ) -> Sequence[int]: if app_vertex: if app_vertex not in self.__seeds.keys(): self.__seeds[app_vertex] = ( @@ -318,8 +341,10 @@ def generate_on_machine(self) -> bool: @overrides(AbstractSynapseDynamics.get_connected_vertices) def get_connected_vertices( - self, s_info: SynapseInformation, source_vertex: ApplicationVertex, - target_vertex: ApplicationVertex): + self, s_info: SynapseInformation, + source_vertex: ApplicationVertex, + target_vertex: ApplicationVertex) -> Sequence[ + Tuple[MachineVertex, Sequence[AbstractVertex]]]: # Things change, so assume all connected return [(m_vertex, [source_vertex]) for m_vertex in target_vertex.splitter.get_in_coming_vertices( diff --git a/spynnaker/pyNN/models/neuron/synapse_io.py b/spynnaker/pyNN/models/neuron/synapse_io.py index f1c1380a12..4d67bb3047 100644 --- a/spynnaker/pyNN/models/neuron/synapse_io.py +++ b/spynnaker/pyNN/models/neuron/synapse_io.py @@ -336,6 +336,8 @@ def _get_row_data( :rtype: ~numpy.ndarray """ # pylint: disable=too-many-arguments + fp_data: Union[NDArray[uint32], List[NDArray[uint32]]] + pp_data: Union[NDArray[uint32], List[NDArray[uint32]]] if isinstance(synapse_dynamics, AbstractStaticSynapseDynamics): # Get the static data ff_data, ff_size = synapse_dynamics.get_static_synaptic_data( diff --git a/spynnaker/pyNN/models/populations/population.py b/spynnaker/pyNN/models/populations/population.py index 536dd770b2..7dcc92da11 100644 --- a/spynnaker/pyNN/models/populations/population.py +++ b/spynnaker/pyNN/models/populations/population.py @@ -13,10 +13,13 @@ # limitations under the License. from __future__ import annotations import logging +import neo import numpy +from numpy import floating from numpy.typing import NDArray import os import inspect +from pyNN.descriptions import TemplateEngine from typing import ( Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple, Type, Union, final, overload, TYPE_CHECKING) @@ -197,10 +200,8 @@ def can_record(self, variable: str) -> bool: return variable in self.__vertex.get_recordable_variables() @overrides(PopulationBase.record, extend_doc=False) - def record( - self, variables: Names, - to_file: Optional[Union[str]] = None, - sampling_interval: Optional[int] = None): + def record(self, variables: Names, to_file: Optional[str] = None, + sampling_interval: Optional[int] = None): """ Record the specified variable or variables for all cells in the Population or view. @@ -239,7 +240,7 @@ def sample(self, n: int, rng: Optional[NumpyRNG] = None) -> PopulationView: @overrides(PopulationBase.write_data, extend_doc=False) def write_data(self, io: Union[str, BaseIO], variables: Names = 'all', - gather=True, clear=False, + gather: bool = True, clear: bool = False, annotations: Optional[Dict[str, Any]] = None): """ Write recorded data to file, using one of the file formats @@ -282,7 +283,9 @@ def write_data(self, io: Union[str, BaseIO], variables: Names = 'all', # write the neo block to the file io.write(data) - def describe(self, template='population_default.txt', engine='default'): + def describe(self, template: str = 'population_default.txt', + engine: Optional[Union[str, TemplateEngine]] = 'default' + ) -> Union[str, Dict[str, Any]]: """ Returns a human-readable description of the population. @@ -310,6 +313,7 @@ def describe(self, template='population_default.txt', engine='default'): context.update(self.annotations) if self.size > 0: parameters = self.__vertex.get_parameters() + cell_parameters: Union[str, ParameterHolder] if parameters: cell_parameters = self.__vertex.get_parameter_values( parameters, 0) @@ -336,8 +340,9 @@ def _end(self) -> None: @overrides(PopulationBase.get_data, extend_doc=False) def get_data( self, variables: Names = 'all', - gather=True, clear=False, *, - annotations: Optional[Dict[str, Any]] = None): + gather: bool = True, clear: bool = False, *, + annotations: Optional[Dict[str, Any]] = None) -> neo.Block: + """ Return a Neo Block containing the data (spikes, state variables) recorded from the Assembly. @@ -369,7 +374,7 @@ def get_data( def spinnaker_get_data( self, variable: str, as_matrix: bool = False, - view_indexes: Optional[Sequence[int]] = None): + view_indexes: Optional[Sequence[int]] = None) -> NDArray[floating]: """ Public accessor for getting data as a numpy array, instead of the Neo-based object @@ -390,7 +395,7 @@ def spinnaker_get_data( variable, as_matrix, view_indexes) @overrides(PopulationBase.get_spike_counts, extend_doc=False) - def get_spike_counts(self, gather=True): + def get_spike_counts(self, gather: bool = True) -> Dict[int, int]: """ Return the number of spikes for each neuron. @@ -599,7 +604,7 @@ def conductance_based(self) -> bool: return self.__vertex.conductance_based def get(self, parameter_names: Names, - gather=True, simplify=True) -> ParameterHolder: + gather: bool = True, simplify=True) -> ParameterHolder: """ Get the values of a parameter for every local cell in the population. @@ -629,7 +634,8 @@ def id_to_index( self, id: Iterable[int]) -> Sequence[int]: # @ReservedAssignment ... - def id_to_index(self, id): # @ReservedAssignment + def id_to_index(self, id: Union[int, Iterable[int]] + ) -> Union[int, Sequence[int]]: # @ReservedAssignment """ Given the ID(s) of cell(s) in the Population, return its (their) index (order in the Population). @@ -658,7 +664,8 @@ def index_to_id(self, index: int) -> int: def index_to_id(self, index: Iterable[int]) -> Sequence[int]: ... - def index_to_id(self, index): + def index_to_id(self, index: Union[int, Iterable[int]] + ) -> Union[int, Sequence[int]]: """ Given the index (order in the Population) of cell(s) in the Population, return their ID(s) diff --git a/spynnaker/pyNN/models/populations/population_base.py b/spynnaker/pyNN/models/populations/population_base.py index 63f0ecf754..58f5e50011 100644 --- a/spynnaker/pyNN/models/populations/population_base.py +++ b/spynnaker/pyNN/models/populations/population_base.py @@ -173,7 +173,7 @@ def local_size(self) -> int: def __len__(self) -> int: raise NotImplementedError - def mean_spike_count(self, gather=True): + def mean_spike_count(self, gather: bool = True) -> float: """ Returns the mean number of spikes per neuron. @@ -276,8 +276,8 @@ def receptor_types(self): _we_dont_do_this_now() # pragma: no cover @abstractmethod - def record(self, variables: Names, to_file=None, - sampling_interval=None): + def record(self, variables: Names, to_file: Optional[str] = None, + sampling_interval: Optional[int] = None): """ Record the specified variable or variables for all cells in the Population or view. @@ -348,7 +348,8 @@ def _recorder(self) -> Recorder: raise NotImplementedError @staticmethod - def _check_params(gather, annotations=None): + def _check_params( + gather: bool, annotations: Optional[Dict[str, Any]] = None): if not gather: logger.warning( "sPyNNaker only supports gather=True. We will run " diff --git a/spynnaker/pyNN/models/populations/population_view.py b/spynnaker/pyNN/models/populations/population_view.py index 418fa12dd5..3f78e82ca0 100644 --- a/spynnaker/pyNN/models/populations/population_view.py +++ b/spynnaker/pyNN/models/populations/population_view.py @@ -413,23 +413,8 @@ def spinnaker_get_data( return self.__population.spinnaker_get_data( variable, as_matrix, self.__indexes) - def get_spike_counts(self, gather=True) -> Dict[int, int]: - """ - Returns a dict containing the number of spikes for each neuron. - - The dict keys are neuron IDs, not indices. - - .. note:: - Implementation of this method is different to Population as the - Populations uses PyNN 7 version of the ``get_spikes`` method which - does not support indexes. - - :param bool gather: - .. note:: - SpiNNaker always gathers. - - :rtype: dict(int,int) - """ + @overrides(PopulationBase.get_spike_counts) + def get_spike_counts(self, gather: bool = True) -> Dict[int, int]: self._check_params(gather) with NeoBufferDatabase() as db: return db.get_spike_counts( diff --git a/spynnaker/pyNN/models/spike_source/spike_source_array_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_array_vertex.py index 4fd093262f..8d286eacd9 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_array_vertex.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_array_vertex.py @@ -310,7 +310,9 @@ def get_neurons_recording( return vertex_slice.get_raster_ids() @overrides(PopulationApplicationVertex.set_recording) - def set_recording(self, name: str, sampling_interval=None, indices=None): + def set_recording( + self, name: str, sampling_interval: Optional[float] = None, + indices: Optional[Collection[int]] = None): if name != "spikes": raise KeyError(f"Cannot record {name}") if sampling_interval is not None: diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson.py index de28ce78c5..bb5f56d782 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_poisson.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson.py @@ -55,7 +55,7 @@ def create_vertex( self, n_neurons: int, label: str, *, seed: Optional[int] = None, max_rate: Optional[float] = None, splitter: Optional[AbstractSplitterCommon] = None, - n_colour_bits: Optional[int] = None): + n_colour_bits: Optional[int] = None) -> SpikeSourcePoissonVertex: """ :param float seed: :param float max_rate: diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py index b7d5409edc..8f66b46116 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson_machine_vertex.py @@ -571,7 +571,7 @@ def read_parameters_from_machine(self, placement: Placement): self._pop_vertex.rates.set_value_by_id(i, numpy.array(rates)) @overrides(SendsSynapticInputsOverSDRAM.sdram_requirement) - def sdram_requirement(self, sdram_machine_edge: SDRAMMachineEdge): + def sdram_requirement(self, sdram_machine_edge: SDRAMMachineEdge) -> int: if isinstance(sdram_machine_edge.post_vertex, ReceivesSynapticInputsOverSDRAM): return sdram_machine_edge.post_vertex.n_bytes_for_transfer diff --git a/spynnaker/pyNN/models/spike_source/spike_source_poisson_vertex.py b/spynnaker/pyNN/models/spike_source/spike_source_poisson_vertex.py index 8216acf70a..3b598cbef8 100644 --- a/spynnaker/pyNN/models/spike_source/spike_source_poisson_vertex.py +++ b/spynnaker/pyNN/models/spike_source/spike_source_poisson_vertex.py @@ -50,9 +50,11 @@ SpikeSourcePoissonMachineVertex, _flatten, get_rates_bytes, get_sdram_edge_params_bytes, get_expander_rates_bytes, get_params_bytes) if TYPE_CHECKING: + from spinn_utilities.ranged.abstract_sized import Selector from .spike_source_poisson import SpikeSourcePoisson from .spike_source_poisson_variable import SpikeSourcePoissonVariable from spynnaker.pyNN.models.projection import Projection + from spynnaker.pyNN.models.common.types import Values logger = FormatAdapter(logging.getLogger(__name__)) @@ -119,6 +121,11 @@ def _normalize_times( return numpy.array(times) +def is_iterable(value: Values) -> TypeGuard[ + Union[Sequence[float], NDArray[numpy.floating]]]: + return hasattr(value, "__iter__") + + class SpikeSourcePoissonVertex( PopulationApplicationVertex, LegacyPartitionerAPI, SupportsStructure): @@ -364,12 +371,14 @@ def __full_name(self, name: str) -> str: return f"{name}s" @overrides(PopulationApplicationVertex.get_parameter_values) - def get_parameter_values(self, names: Names, selector=None): + def get_parameter_values( + self, names: Names, selector: Selector = None) -> ParameterHolder: self._check_parameters(names, self.__allowed_parameters) return ParameterHolder(names, self.__read_parameter, selector) @overrides(PopulationApplicationVertex.set_parameter_values) - def set_parameter_values(self, name: str, value, selector=None): + def set_parameter_values( + self, name: str, value: Values, selector: Selector = None): self._check_parameters(name, self.__allowed_parameters) if self.__is_variable_rate: raise KeyError(f"Cannot set the {name} of a variable rate Poisson") @@ -382,7 +391,7 @@ def set_parameter_values(self, name: str, value, selector=None): # Must be parameter without the s fixed_name = f"{name}s" - if hasattr(value, "__len__"): + if is_iterable(value): # Single start per neuron for whole simulation self.__data[fixed_name].set_value_by_selector( selector, [numpy.array([s]) for s in value]) @@ -421,7 +430,9 @@ def get_recording_region(self, name: str) -> int: return 0 @overrides(PopulationApplicationVertex.set_recording) - def set_recording(self, name: str, sampling_interval=None, indices=None): + def set_recording( + self, name: str, sampling_interval: Optional[float] = None, + indices: Optional[Collection[int]] = None): if name != "spikes": raise KeyError(f"Cannot record {name}") if sampling_interval is not None: diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py index c673f1bf50..d47a5e9180 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from __future__ import annotations from enum import IntEnum -from typing import cast +from typing import Sequence, TYPE_CHECKING from spinnman.model.enums import ExecutableType from spinn_front_end_common.interface.simulation import simulation_utilities from spinn_front_end_common.utilities.constants import SIMULATION_N_BYTES from spinn_utilities.overrides import overrides from pacman.model.graphs.machine import MachineVertex +from pacman.model.resources import AbstractSDRAM from spinn_front_end_common.interface.provenance import ( ProvidesProvenanceDataFromMachineImpl, ProvenanceWriter) from spinn_front_end_common.abstract_models import ( @@ -26,6 +29,9 @@ from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID from .delay_extension_vertex import DelayExtensionVertex +if TYPE_CHECKING: + from pacman.model.placements import Placement + from spinn_front_end_common.interface.ds import DataSpecificationGenerator class DelayExtensionMachineVertex( @@ -94,7 +100,8 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(IntEnum): BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" - def __init__(self, sdram, label, vertex_slice, app_vertex=None): + def __init__(self, sdram: AbstractSDRAM, label, vertex_slice, + app_vertex=None): """ :param ~pacman.model.resources.AbstractSDRAM sdram: The SDRAM required by the vertex @@ -109,25 +116,33 @@ def __init__(self, sdram, label, vertex_slice, app_vertex=None): label, app_vertex=app_vertex, vertex_slice=vertex_slice) self.__sdram = sdram + @property + @overrides(MachineVertex.app_vertex) + def app_vertex(self) -> DelayExtensionVertex: + assert isinstance(self._app_vertex, DelayExtensionVertex) + return self._app_vertex + @property @overrides(ProvidesProvenanceDataFromMachineImpl._provenance_region_id) - def _provenance_region_id(self): + def _provenance_region_id(self) -> int: return self._DELAY_EXTENSION_REGIONS.PROVENANCE_REGION @property @overrides( ProvidesProvenanceDataFromMachineImpl._n_additional_data_items) - def _n_additional_data_items(self): + def _n_additional_data_items(self) -> int: return self.N_EXTRA_PROVENANCE_DATA_ENTRIES @property @overrides(MachineVertex.sdram_required) - def sdram_required(self): + def sdram_required(self) -> AbstractSDRAM: return self.__sdram @overrides(ProvidesProvenanceDataFromMachineImpl. parse_extra_provenance_items) - def parse_extra_provenance_items(self, label, x, y, p, provenance_data): + def parse_extra_provenance_items( + self, label: str, x: int, y: int, p: int, + provenance_data: Sequence[int]): (n_received, n_processed, n_added, n_sent, n_overflows, n_delays, n_sat, n_bad_neuron, n_bad_keys, n_late_spikes, max_bg, n_bg_overloads) = provenance_data @@ -199,7 +214,7 @@ def parse_extra_provenance_items(self, label, x, y, p, provenance_data): db.insert_core(x, y, p, self.N_LATE_SPIKES_NAME, n_late_spikes) if n_late_spikes == 0: pass - elif self._app_vertex.drop_late_spikes: + elif self.app_vertex.drop_late_spikes: db.insert_report( f"On {label}, {n_late_spikes} packets were dropped from " f"the input buffer, because they arrived too late to be " @@ -234,20 +249,21 @@ def parse_extra_provenance_items(self, label, x, y, p, provenance_data): @overrides(MachineVertex.get_n_keys_for_partition) def get_n_keys_for_partition(self, partition_id: str) -> int: n_keys = super().get_n_keys_for_partition(partition_id) - v = cast(DelayExtensionVertex, self.app_vertex) + v = self.app_vertex n_colours = 2 ** v.n_colour_bits return n_keys * v.n_delay_stages * n_colours @overrides(AbstractHasAssociatedBinary.get_binary_file_name) - def get_binary_file_name(self): + def get_binary_file_name(self) -> str: return "delay_extension.aplx" @overrides(AbstractHasAssociatedBinary.get_binary_start_type) - def get_binary_start_type(self): + def get_binary_start_type(self) -> ExecutableType: return ExecutableType.USES_SIMULATION_INTERFACE @overrides(AbstractGeneratesDataSpecification.generate_data_specification) - def generate_data_specification(self, spec, placement): + def generate_data_specification( + self, spec: DataSpecificationGenerator, placement: Placement): vertex = placement.vertex # Reserve memory: @@ -255,7 +271,7 @@ def generate_data_specification(self, spec, placement): # ################################################################### # Reserve SDRAM space for memory areas: - delay_params_sz = self._app_vertex.delay_params_size() + delay_params_sz = self.app_vertex.delay_params_size() spec.reserve_memory_region( region=self._DELAY_EXTENSION_REGIONS.SYSTEM, @@ -268,6 +284,7 @@ def generate_data_specification(self, spec, placement): # reserve region for provenance self.reserve_provenance_data_region(spec) + assert isinstance(vertex, AbstractHasAssociatedBinary) self._write_setup_info(spec, vertex.get_binary_file_name()) spec.comment("\n*** Spec for Delay Extension Instance ***\n\n") @@ -282,6 +299,7 @@ def generate_data_specification(self, spec, placement): if source_vertex.vertex_slice == self.vertex_slice: r_info = routing_infos.get_routing_info_from_pre_vertex( source_vertex, SPIKE_PARTITION_ID) + assert (r_info is not None) incoming_key = r_info.key incoming_mask = r_info.mask break @@ -336,14 +354,15 @@ def write_delay_parameters( # Write the number of neurons in the block: spec.write_value(data=vertex_slice.n_atoms) + app_vertex = self.app_vertex # Write the number of blocks of delays: - spec.write_value(data=self._app_vertex.n_delay_stages) + spec.write_value(data=app_vertex.n_delay_stages) # write the delay per delay stage - spec.write_value(data=self._app_vertex.delay_per_stage) + spec.write_value(data=app_vertex.delay_per_stage) # write whether to throw away spikes - spec.write_value(data=int(self._app_vertex.drop_late_spikes)) + spec.write_value(data=int(app_vertex.drop_late_spikes)) # Write the number of colour bits - spec.write_value(data=self.app_vertex.n_colour_bits) + spec.write_value(data=app_vertex.n_colour_bits) diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py index 74bbee386e..4d468259a8 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations -from typing import List, Sequence, TYPE_CHECKING, cast +from typing import List, Sequence, Tuple, TYPE_CHECKING, cast from spinn_utilities.overrides import overrides from spinn_utilities.config_holder import get_config_bool from pacman.model.graphs.application import ( @@ -87,7 +87,7 @@ def n_atoms(self) -> int: @property @overrides(ApplicationVertex.atoms_shape) - def atoms_shape(self): + def atoms_shape(self) -> Tuple[int, ...]: return self.__partition.pre_vertex.atoms_shape @property diff --git a/spynnaker/pyNN/models/utility_models/spike_injector/spike_injector_vertex.py b/spynnaker/pyNN/models/utility_models/spike_injector/spike_injector_vertex.py index 6e643b0a25..afc3e4d310 100644 --- a/spynnaker/pyNN/models/utility_models/spike_injector/spike_injector_vertex.py +++ b/spynnaker/pyNN/models/utility_models/spike_injector/spike_injector_vertex.py @@ -81,7 +81,9 @@ def get_recordable_variables(self) -> List[str]: return ["spikes"] @overrides(PopulationApplicationVertex.set_recording) - def set_recording(self, name: str, sampling_interval=None, indices=None): + def set_recording( + self, name: str, sampling_interval: Optional[float] = None, + indices: Optional[Collection[int]] = None): if name != "spikes": raise KeyError(f"Cannot record {name}") if sampling_interval is not None: diff --git a/spynnaker/pyNN/utilities/data_population.py b/spynnaker/pyNN/utilities/data_population.py index 1b83a84ec2..7d0514e1af 100644 --- a/spynnaker/pyNN/utilities/data_population.py +++ b/spynnaker/pyNN/utilities/data_population.py @@ -16,7 +16,9 @@ import numpy from numpy import floating from numpy.typing import NDArray -from typing import Any, Dict, Optional, Sequence, Union, TYPE_CHECKING +from pyNN.descriptions import TemplateEngine +from typing import ( + Any, Dict, Iterable, Optional, overload, Sequence, Union, TYPE_CHECKING) import neo # type: ignore[import] from spinn_utilities.ranged.abstract_sized import AbstractSized, Selector from spinn_utilities.log import FormatAdapter @@ -65,12 +67,14 @@ def write_data(self, io: Union[str, neo.baseio.BaseIO], # write the neo block to the file io.write(bl=data) - @overrides(Population.describe) - def describe(self, template=None, engine=None) -> Any: + @overrides(Population.describe, adds_typing=True) + def describe(self, template: Optional[str] = None, + engine: Optional[Union[str, TemplateEngine]] = None + ) -> Union[str, Dict[str, Any]]: if template is not None: - logger.warning("Ignoring template as supported in this mode") + logger.warning("Ignoring template as not supported in this mode") if engine is not None: - logger.warning("Ignoring engine as supported in this mode") + logger.warning("Ignoring engine as not supported in this mode") with NeoBufferDatabase(self.__database_file) as db: _, _, description = db.get_population_metadata(self.label) return description @@ -78,8 +82,8 @@ def describe(self, template=None, engine=None) -> Any: @overrides(Population.get_data) def get_data( self, variables: Names = 'all', - gather=True, clear: bool = False, *, - annotations: Annotations = None) -> neo.Block: + gather: bool = True, clear: bool = False, *, + annotations: Optional[Dict[str, Any]] = None) -> neo.Block: # pylint: disable=protected-access Population._check_params(gather, annotations) if clear: @@ -99,7 +103,7 @@ def spinnaker_get_data( self.__label, variable, as_matrix, self._indexes) @overrides(Population.get_spike_counts) - def get_spike_counts(self, gather=True) -> Dict[int, int]: + def get_spike_counts(self, gather: bool = True) -> Dict[int, int]: # pylint: disable=protected-access Population._check_params(gather) with NeoBufferDatabase(self.__database_file) as db: @@ -126,8 +130,20 @@ def local_size(self) -> int: def size(self) -> int: return self._size + @overload + def id_to_index(self, id: int) -> int: # @ReservedAssignment + # pylint: disable=redefined-builtin + ... + + @overload + def id_to_index( + self, id: Iterable[int]) -> Sequence[int]: # @ReservedAssignment + # pylint: disable=redefined-builtin + ... + @overrides(Population.id_to_index) - def id_to_index(self, id): # @ReservedAssignment + def id_to_index(self, id: Union[int, Iterable[int]] + ) -> Union[int, Sequence[int]]: # @ReservedAssignment # pylint: disable=redefined-builtin # assuming not called often so not caching first id with NeoBufferDatabase(self.__database_file) as db: @@ -141,8 +157,17 @@ def id_to_index(self, id): # @ReservedAssignment return int(id - first_id) # assume IDs are consecutive return id - first_id + @overload + def index_to_id(self, index: int) -> int: + ... + + @overload + def index_to_id(self, index: Iterable[int]) -> Sequence[int]: + ... + @overrides(Population.index_to_id) - def index_to_id(self, index): + def index_to_id(self, index: Union[int, Iterable[int]] + ) -> Union[int, Sequence[int]]: # assuming not called often so not caching first id with NeoBufferDatabase(self.__database_file) as db: _, first_id, _ = db.get_population_metadata(self.__label) @@ -184,7 +209,7 @@ def __getitem__(self, index_or_slice: Selector) -> DataPopulation: return DataPopulation(self.__database_file, self.__label, indexes) @overrides(Population.mean_spike_count) - def mean_spike_count(self, gather=True) -> float: + def mean_spike_count(self, gather: bool = True) -> float: Population._check_params(gather) # pylint: disable=protected-access counts = self.get_spike_counts() return sum(counts.values()) / len(counts) diff --git a/spynnaker/pyNN/utilities/ranged/spynnaker_ranged_list.py b/spynnaker/pyNN/utilities/ranged/spynnaker_ranged_list.py index 521998f1ea..156e1b5308 100644 --- a/spynnaker/pyNN/utilities/ranged/spynnaker_ranged_list.py +++ b/spynnaker/pyNN/utilities/ranged/spynnaker_ranged_list.py @@ -12,9 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Callable, List, Optional, Sequence, Union +from typing_extensions import TypeAlias from pyNN.random import RandomDistribution from spinn_utilities.overrides import overrides from spinn_utilities.ranged.ranged_list import RangedList +from spinn_utilities.ranged.abstract_list import IdsType, T + +# The type of things we consider to be a list of values +_ListType: TypeAlias = Union[Callable[[int], T], Sequence[T], + RandomDistribution] +# The type of value arguments in several places +_ValueType: TypeAlias = Optional[Union[T, _ListType]] class SpynnakerRangedList(RangedList): @@ -24,14 +33,16 @@ class SpynnakerRangedList(RangedList): """ @overrides(RangedList.listness_check) - def listness_check(self, value): + def listness_check(self, value: _ValueType) -> bool: if isinstance(value, RandomDistribution): return True return super().listness_check(value) @overrides(RangedList.as_list) - def as_list(self, value, size, ids=None): + def as_list( + self, value: _ListType, size: int, + ids: Optional[IdsType] = None) -> List[T]: if isinstance(value, RandomDistribution): return value.next(n=size) diff --git a/unittests/mocks.py b/unittests/mocks.py index 0d30d27814..94df956316 100644 --- a/unittests/mocks.py +++ b/unittests/mocks.py @@ -27,12 +27,12 @@ def __init__(self, size, label, vertex=None): @property @overrides(Population.size) - def size(self): + def size(self) -> int: return self._size @property @overrides(Population.label) - def label(self): + def label(self) -> str: return self.label def __repr__(self): diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index e4f76903c8..aefd540c23 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -13,6 +13,7 @@ # limitations under the License. import shutil import struct +from typing import BinaryIO, Optional, Tuple, Union import unittest from tempfile import mkdtemp import numpy @@ -62,13 +63,17 @@ class _MockTransceiverinOut(MockableTransceiver): @overrides(MockableTransceiver.malloc_sdram) - def malloc_sdram(self, x, y, size, app_id, tag=None): + def malloc_sdram( + self, x: int, y: int, size: int, app_id: int, tag: int = 0) -> int: self._data_to_read = bytearray(size) return 0 @overrides(MockableTransceiver.write_memory) - def write_memory(self, x, y, base_address, data, *, n_bytes=None, - offset=0, cpu=0, get_sum=False): + def write_memory( + self, x: int, y: int, base_address: int, + data: Union[BinaryIO, bytes, int, str], *, + n_bytes: Optional[int] = None, offset: int = 0, cpu: int = 0, + get_sum: bool = False) -> Tuple[int, int]: if data is None: return if isinstance(data, int): @@ -76,15 +81,18 @@ def write_memory(self, x, y, base_address, data, *, n_bytes=None, self._data_to_read[base_address:base_address + len(data)] = data @overrides(Transceiver.get_region_base_address) - def get_region_base_address(self, x, y, p): + def get_region_base_address(self, x: int, y: int, p: int): return 0 @overrides(MockableTransceiver.read_memory) - def read_memory(self, x, y, base_address, length, cpu=0): + def read_memory( + self, x: int, y: int, base_address: int, length: int, + cpu: int = 0) -> bytearray: return self._data_to_read[base_address:base_address + length] @overrides(MockableTransceiver.read_word) - def read_word(self, x, y, base_address, cpu=0): + def read_word( + self, x: int, y: int, base_address: int, cpu: int = 0) -> int: datum, = struct.unpack("