Skip to content

Commit

Permalink
comments and minor fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
Christian-B committed Mar 22, 2024
1 parent 54d8e91 commit 0584754
Show file tree
Hide file tree
Showing 2 changed files with 100 additions and 30 deletions.
113 changes: 92 additions & 21 deletions pacman/operations/placer_algorithms/application_placer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from __future__ import annotations
import logging
import os
from typing import Dict, List, Optional, Set
from typing import Dict, List, Optional, Tuple, Sequence, Set

from spinn_utilities.config_holder import get_config_bool
from spinn_utilities.log import FormatAdapter
Expand All @@ -27,6 +27,7 @@
from pacman.model.graphs import AbstractVirtual
from pacman.model.graphs.machine import MachineVertex
from pacman.model.graphs.application import ApplicationVertex
from pacman.model.resources import AbstractSDRAM
from pacman.exceptions import (
PacmanPlaceException, PacmanConfigurationException, PacmanTooBigToPlace)

Expand All @@ -46,10 +47,14 @@ def place_application_graph(system_placements: Placements) -> Placements:
:rtype: Placements
"""
placer = ApplicationPlacer(system_placements)
return placer._do_placements(system_placements)
return placer.do_placements(system_placements)


class ApplicationPlacer(object):
"""
Places the Vertices keeping ones for an ApplicationVertex together.
"""
__slots__ = (
# Values from PacmanDataView cached for speed
# PacmanDataView.get_machine()
Expand All @@ -58,14 +63,14 @@ class ApplicationPlacer(object):
"__plan_n_timesteps",
# Sdram available on perfect none Ethernet Chip after Monitors placed
"__max_sdram",
# Minumum sdram that should be available for a Chip to not be full
# Minimum sdram that should be available for a Chip to not be full
"__min_sdram",
# N Cores free on perfect none Ethernet Chip after Monitors placed
"__max_cores",

# Pointer to the placements including all previous Application Vertices
"__placements",
# A Function to yield the Chips in a consistant order
# A Function to yield the Chips in a consistent order
"__chips",
# Chips that have been fully placed by previous Application Vertices
"__full_chips",
Expand All @@ -76,7 +81,7 @@ class ApplicationPlacer(object):
# Start Chips tried for this ApplicationVertex
"__starts_tried",
# Label of the current ApplicationVertex for (error) reporting
"__app_vertex",
"__app_vertex_label",

# Data for the last Chip offered to place on
# May be full after current group placed
Expand Down Expand Up @@ -124,15 +129,15 @@ def __init__(self, placements: Placements):
self.__current_chip: Optional[Chip] = None
self.__current_cores_free: List[int] = list()
self.__current_sdram_free = 0
self.__app_vertex = "NO APP VETERX SET"
self.__app_vertex_label = "NO APP VETERX SET"

# Set some value so no Optional needed
self.__ethernet_x = -1
self.__ethernet_y = -1
self.__same_board_chips: Dict[Chip, Chip] = dict()
self.__other_board_chips: Dict[Chip, Chip] = dict()

def _do_placements(self, system_placements: Placements) -> Placements:
def do_placements(self, system_placements: Placements) -> Placements:
"""
Perform placement of an application graph on the machine.
Expand All @@ -144,6 +149,9 @@ def _do_placements(self, system_placements: Placements) -> Placements:
:return: Placements for the application.
*Includes the system placements.*
:rtype: Placements
:raises PacmanPlaceException: If no new start Chip is available
:raises PacmanTooBigToPlace:
If the requirements are too big for any chip
"""
# Go through the application graph by application vertex
progress = ProgressBar(
Expand All @@ -154,7 +162,6 @@ def _do_placements(self, system_placements: Placements) -> Placements:
if app_vertex.has_fixed_location():
self._place_fixed_vertex(app_vertex)

plan_n_timesteps = PacmanDataView.get_plan_n_timestep()
for app_vertex in progress.over(PacmanDataView.iterate_vertices()):
# as this checks if placed already not need to check if fixed
self._place_vertex(app_vertex)
Expand All @@ -168,17 +175,25 @@ def _do_placements(self, system_placements: Placements) -> Placements:

return self.__placements

def _place_vertex(self, app_vertex: ApplicationVertex,):
def _place_vertex(self, app_vertex: ApplicationVertex):
"""
Place the next application vertex
:param ApplicationVertex app_vertex:
:raises PacmanPlaceException: If no new start Chip is available
:raises PacmanTooBigToPlace:
If the requirements are too big for any chip
"""
same_chip_groups = app_vertex.splitter.get_same_chip_groups()
if not same_chip_groups:
# This vertex does not require placement or delegates
return

self.__app_vertex = app_vertex.label
self.__app_vertex_label = app_vertex.label

# Restore the starts tried last time.
# Check if they are full comes later
while len(self.__starts_tried):
while len(self.__starts_tried) > 0:
self.__restored_chips.append(self.__starts_tried.pop(0))

# try to make placements with a different start Chip each time
Expand All @@ -190,7 +205,28 @@ def _place_vertex(self, app_vertex: ApplicationVertex,):
# Now actually add the placements having confirmed all can be done
self.__placements.add_placements(placements_to_make)

def _prepare_placements(self, same_chip_groups):
def _prepare_placements(self, same_chip_groups: Sequence[
Tuple[Sequence[MachineVertex], AbstractSDRAM]]
) -> Optional[List[Placement]]:
"""
Try to make the placements for this ApplicationVertex.
This will make sure all placements are on linked Chips
The next start Chip is tried.
If successful a list of created but NOT added placements is returned
If this start Chip fails it returns None.
A start chip could fail either because it does not have enough space
or because its neighbours do not have enough space
:param list(list(MachineVertex), AbstractSdram) same_chip_groups:
:raises PacmanPlaceException: If no new start Chip is available
:raises PacmanTooBigToPlace:
If the requirements are too big for any chip
"""
# Clear the Chips used in the last prepare
self.__prepared_chips.clear()
self.__current_chip = None
Expand All @@ -201,6 +237,7 @@ def _prepare_placements(self, same_chip_groups):
for vertices, sdram in same_chip_groups:
vertices_to_place = self._filter_vertices(vertices)
if len(vertices_to_place) == 0:
# Either placed (fixed) or virtual so skip group
continue
plan_sdram = sdram.get_total_sdram(self.__plan_n_timesteps)
n_cores = len(vertices_to_place)
Expand All @@ -217,7 +254,16 @@ def _prepare_placements(self, same_chip_groups):
vertex, chip.x, chip.y, core))
return placements_to_make

def _filter_vertices(self, vertices):
def _filter_vertices(
self, vertices: List[MachineVertex]) -> List[MachineVertex]:
"""
Removes an already placed or virtual vertices.
Errors on groups that have both placed and unplaced vertices!
:param vertices:
:rtype: List(MachineVertex)
"""
# Remove any already placed
vertices_to_place = [
vertex for vertex in vertices
Expand Down Expand Up @@ -321,6 +367,13 @@ def _place_error(self, system_placements: Placements,
f" Report written to {report_file}.")

def _place_fixed_vertex(self, app_vertex: ApplicationVertex):
"""
Place all vertices for this Application Vertex
Checks that all MachineVertices are fixed or errors
:param ApplicationVertex app_vertex:
"""
same_chip_groups = app_vertex.splitter.get_same_chip_groups()
if not same_chip_groups:
raise NotImplementedError("Unexpected mix of Fixed and no groups")
Expand All @@ -331,8 +384,13 @@ def _place_fixed_vertex(self, app_vertex: ApplicationVertex):

def _do_fixed_location(self, vertices: list[MachineVertex]):
"""
Do fixed placing for one group.
Errors if the group does not have a fixed location
:param list(MachineVertex) vertices:
:raise PacmanConfigurationException:
If the requested location is not available
"""
for vertex in vertices:
loc = vertex.get_fixed_location()
Expand Down Expand Up @@ -380,7 +438,6 @@ def _chip_order(self):
"""
Iterate the Chips in a guaranteed order
:param Machine machine:
:rtype: iterable(Chip)
"""
for x in range(self.__machine.width):
Expand All @@ -407,12 +464,12 @@ def _space_on_chip(
The values Cached are the:
current_chip Even if full to keep the code simpler
current_cores_free Including the ones for this group
current_sdram_free Excluding the sdram needed fot this group
current_sdram_free Excluding the sdram needed fit this group
:param Chip chip:
:param int n_cores: number of cores needed
:param int plan_sdram:
:rtype: tuple(int, int)
:rtype: bool
:raises PacmanTooBigToPlace:
If the requirements are too big for any chip
"""
Expand Down Expand Up @@ -449,13 +506,15 @@ def _space_on_chip(
# sdram is the whole group so can be removed now
self.__current_sdram_free = sdram_free - plan_sdram

# adds the neighburs
# adds the neighbours
self._add_neighbours(chip)

return True

def _check_could_fit(self, n_cores: int, plan_sdram: int):
"""
Checks that the cores/SDRAM would fit on a empty perfect Chip
:param int n_cores: number of cores needs
:param int plan_sdram: minimum amount of SDRAM needed
:raises PacmanTooBigToPlace:
Expand All @@ -465,7 +524,7 @@ def _check_could_fit(self, n_cores: int, plan_sdram: int):
# should fit somewhere
return
message = (
f"{self.__app_vertex} will not fit on any possible Chip "
f"{self.__app_vertex_label} will not fit on any possible Chip "
f"as a smae_chip_group ")

version = PacmanDataView.get_machine_version()
Expand Down Expand Up @@ -519,7 +578,7 @@ def _get_next_start(self, n_cores: int, plan_sdram: int) -> Chip:
start = self._pop_start_chip()
# Save as tried as not full even if toO small
self.__starts_tried.append(start)
# Set the ethernets in case space_on_chip adds neighbours
# Set the Ethernet x and y in case space_on_chip adds neighbours
self.__ethernet_x = start.nearest_ethernet_x
self.__ethernet_y = start.nearest_ethernet_y
if self._space_on_chip(start, n_cores, plan_sdram):
Expand Down Expand Up @@ -548,7 +607,7 @@ def _pop_start_chip(self) -> Chip:
return start
except StopIteration:
raise PacmanPlaceException( # pylint: disable=raise-missing-from
f"No more chips to start with for {self.__app_vertex} "
f"No more chips to start with for {self.__app_vertex_label} "
f"Out of {self.__machine.n_chips} "
f"{len(self.__full_chips)} already full "
f"and {len(self.__starts_tried)} tried")
Expand All @@ -559,7 +618,7 @@ def _get_next_neighbour(self, n_cores: int, plan_sdram: int):
Also changes the current_chip and updates the neighbourhood
This wil return None if there are no more neighbouring Chip big enough
This will return None if there are no more neighbouring Chip big enough
:param int n_cores: number of cores needs
:param int plan_sdram: minimum amount of SDRAM needed
Expand Down Expand Up @@ -601,6 +660,12 @@ def _get_next_chip_with_space(
return self._get_next_neighbour(n_cores, plan_sdram)

def _add_neighbours(self, chip: Chip):
"""
Adds the neighbours for this Chip to be used as the next chips.
:param Chip chip:
:return:
"""
for link in chip.router.links:
target = self.__machine[link.destination_x, link.destination_y]
if (target not in self.__full_chips
Expand All @@ -612,6 +677,12 @@ def _add_neighbours(self, chip: Chip):
self.__other_board_chips[target] = target

def _pop_neighbour(self) -> Optional[Chip]:
"""
Pops the next neighbour Chip with preference to ones on current board
:return: A neighbour Chip or None if there are no More
:rtype: None
"""
if self.__same_board_chips:
k = next(iter(self.__same_board_chips))
del self.__same_board_chips[k]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,11 @@
from spinn_utilities.config_holder import set_config
from spinn_machine.virtual_machine import virtual_machine
from pacman.data.pacman_data_writer import PacmanDataWriter
from pacman.exceptions import (
PacmanConfigurationException, PacmanPlaceException, PacmanTooBigToPlace)
from pacman.exceptions import (PacmanPlaceException, PacmanTooBigToPlace)
from pacman.model.partitioner_splitters import (
SplitterFixedLegacy, AbstractSplitterCommon)
from pacman.operations.placer_algorithms.application_placer import (
place_application_graph, _Spaces)
place_application_graph, ApplicationPlacer)
from pacman.model.graphs.machine import SimpleMachineVertex
from pacman.model.resources import ConstantSDRAM
from pacman.model.graphs.application import ApplicationVertex
Expand Down Expand Up @@ -219,8 +218,8 @@ def test_sdram_bigger_monitors():
# This is purely an info call so test check directly
writer.add_sample_monitor_vertex(monitor, True)
try:
spaces = _Spaces(Placements())
spaces._Spaces__check_could_fit(1, plan_sdram=max_sdram // 2 + 5)
placer = ApplicationPlacer(Placements())
placer._check_could_fit(1, plan_sdram=max_sdram // 2 + 5)
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("after monitors only" in str(ex))
Expand Down Expand Up @@ -258,8 +257,8 @@ def test_more_cores_with_monitor():
# This is purely an info call so test check directly
writer.add_sample_monitor_vertex(monitor, True)
try:
spaces = _Spaces(Placements())
spaces._Spaces__check_could_fit(17, 500000)
placer = ApplicationPlacer(Placements())
placer._check_could_fit(17, 500000)
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("reserved for monitors" in str(ex))
Expand All @@ -271,5 +270,5 @@ def test_could_fit():
writer = PacmanDataWriter.mock()
monitor = SimpleMachineVertex(ConstantSDRAM(0))
writer.add_sample_monitor_vertex(monitor, True)
spaces = _Spaces(Placements())
spaces._Spaces__check_could_fit(16, 500000)
placer = ApplicationPlacer(Placements())
placer._check_could_fit(16, 500000)

0 comments on commit 0584754

Please sign in to comment.