Skip to content

Commit

Permalink
Add tests
Browse files Browse the repository at this point in the history
  • Loading branch information
ThrudPrimrose committed Nov 20, 2024
1 parent 400257d commit c14b91e
Show file tree
Hide file tree
Showing 2 changed files with 207 additions and 0 deletions.
53 changes: 53 additions & 0 deletions dace/sdfg/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -542,6 +542,59 @@ def validate_state(state: 'dace.sdfg.SDFGState',
'written to, but only given to nested SDFG as an '
'input connector' % node.data, sdfg, state_id, nid)

# Deferred allocation related tests
insize = "_write_size"
outsize = "_read_size"
read_size_edges = list(state.edges_by_connector(node, outsize))
write_size_edges = list(state.edges_by_connector(node, insize))

# Reading-Writing the size is valid only if the array is transient and has the storage type CPU_Heap or GPU_Global
has_writes_or_reads = len(read_size_edges) + len(write_size_edges) > 0
size_access_allowed = arr.transient and (arr.storage == dtypes.StorageType.CPU_Heap or arr.storage == dtypes.StorageType.GPU_Global)
if has_writes_or_reads and not size_access_allowed:
raise InvalidSDFGNodeError('Reading the size of an array, or changing (writing to) the size of an array '
'is only valid if the array is transient and the storage is CPU_Heap or GPU_Global', sdfg, state_id, nid)

if len(write_size_edges) > 1:
raise InvalidSDFGNodeError('One node can have at maximum one edge writing to its size descriptior', sdfg, state_id, nid)

# The write needs to always have the same length of the dimension of the node
if len(write_size_edges) == 1:
write_size_edge = write_size_edges[0]
edge_id = state.edge_id(write_size_edge)
required_range = len(arr.shape)
try:
elements = int(write_size_edge.data.num_elements())
if elements != required_range or write_size_edge.data.subset.dims() != 1:
raise Exception
except Exception:
raise InvalidSDFGEdgeError('The write to a node needs to match the length of the array shape '
'the volume needs to be integer (not symbolic) and the shape one dimensional', sdfg, state_id, edge_id)

# Reads to map can be only scalars-sized
for read_size_edge in read_size_edges:
edge_id = state.edge_id(read_size_edge)
from dace import nodes
if (isinstance(read_size_edge.dst, nodes.EntryNode) or
isinstance(read_size_edge.dst, nodes.AccessNode) or
isinstance(read_size_edge.dst, nodes.Tasklet)):
if isinstance(read_size_edge.dst, nodes.MapEntry):
required_range = 1
try:
elements = int(read_size_edge.data.num_elements())
if elements != required_range and read_size_edge.data.subset.dims() != 1:
raise Exception()
except Exception:
raise InvalidSDFGEdgeError('The read to a map entry needs have dimension 1'
'If reading multiple dimensions, multiple edges need to go to the map entry', sdfg, state_id, edge_id)
else:
raise InvalidSDFGEdgeError('The read size should connect to an entry node, access node, or tasklet (this can be changed)'
, sdfg, state_id, edge_id)





if (isinstance(node, nd.ConsumeEntry) and "IN_stream" not in node.in_connectors):
raise InvalidSDFGNodeError("Consume entry node must have an input stream", sdfg, state_id, nid)
if (isinstance(node, nd.ConsumeEntry) and "OUT_stream" not in node.out_connectors):
Expand Down
154 changes: 154 additions & 0 deletions tests/deferred_alloc_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
import dace
import numpy

def _get_trivial_alloc_sdfg(storage_type: dace.dtypes.StorageType, transient: bool, write_size="0:2"):
sdfg = dace.sdfg.SDFG(name="deferred_alloc_test")

sdfg.add_array(name="A", shape=(15, "__dace_defer"), dtype=dace.float32, storage=storage_type, transient=transient)

state = sdfg.add_state("main")

an_1 = state.add_access('A')
an_1.add_in_connector('_write_size')

an_2 = state.add_array(name="user_size", shape=(2,), dtype=numpy.uint64)

state.add_edge(an_2, None, an_1, '_write_size',
dace.Memlet(expr=f"user_size[{write_size}]") )

sdfg.save("def_alloc_1.sdfg")

return sdfg


def _get_assign_map_sdfg(storage_type: dace.dtypes.StorageType, transient: bool):
sdfg = dace.sdfg.SDFG(name="deferred_alloc_test_4")

sdfg.add_array(name="A", shape=(15, "__dace_defer"), dtype=dace.float32, storage=storage_type,
lifetime=dace.dtypes.AllocationLifetime.SDFG, transient=transient)

state = sdfg.add_state("main")

an_1 = state.add_access('A')
an_1.add_in_connector('_write_size')
an_1.add_out_connector('_read_size')

an_2 = state.add_array(name="user_size", shape=(2,), dtype=numpy.uint64)

state.add_edge(an_2, None, an_1, '_write_size',
dace.Memlet(expr="user_size[0:2]") )

map_entry, map_exit = state.add_map(name="map",ndrange={"i":dace.subsets.Range([(0,15-1,1)]),"j":dace.subsets.Range([(0,"__A_dim1_size-1", 1)]) })
state.add_edge(an_1, '_read_size', map_entry, "__A_dim1_size", dace.Memlet(expr="A_size[1]"))
map_entry.add_in_connector("__A_dim1_size")
map_exit.add_in_connector("IN_A")
map_exit.add_out_connector("OUT_A")

t1 = state.add_tasklet(name="assign", inputs={}, outputs={"_out"}, code="_out=3.0")
state.add_edge(map_entry, None, t1, None, dace.Memlet(None))
state.add_edge(t1, "_out", map_exit, "IN_A", dace.Memlet(expr="A[i, j]"))

an_3 = state.add_access('A')
state.add_edge(map_exit, "OUT_A", an_3, None, dace.Memlet(data="A", subset=dace.subsets.Range([(0,15-1, 1), (0,"__A_dim1_size-1", 1)])))

an_3.add_out_connector('_read_size')
map_entry2, map_exit2 = state.add_map(name="map2",ndrange={"i":dace.subsets.Range([(0,15-1,1)]),"j":dace.subsets.Range([(0,"__A_dim1_size-1", 1)]) })
state.add_edge(an_3, '_read_size', map_entry2, "__A_dim1_size", dace.Memlet(expr="A_size[1]"))
state.add_edge(an_3, None, map_entry2, "IN_A", dace.Memlet(expr="A[0:15, 0:__A_dim1_size]"))
map_entry2.add_in_connector("__A_dim1_size")
map_entry2.add_in_connector("IN_A")
map_entry2.add_out_connector("OUT_A")
map_exit2.add_in_connector("IN_A")
map_exit2.add_out_connector("OUT_A")

t2 = state.add_tasklet(name="check", inputs={"_in"}, outputs={"_out"}, code='if (_in != 5.0){ throw std::runtime_error("fail"); } \n _out=_in;', language=dace.dtypes.Language.CPP)
state.add_edge(map_entry2, "OUT_A", t2, "_in", dace.Memlet(expr="A[i, j]"))
state.add_edge(t2, "_out", map_exit2, "IN_A", dace.Memlet(expr="A[i, j]"))

an_5 = state.add_access('A')
state.add_edge(map_exit2, "OUT_A", an_5, None, dace.Memlet(data="A", subset=dace.subsets.Range([(0,15-1, 1), (0,"__A_dim1_size-1", 1)])))

sdfg.save("def_alloc_4.sdfg")

return sdfg


def _valid_to_reallocate(transient, storage_type, scope):
return transient and (storage_type == dace.dtypes.StorageType.GPU_Global or storage_type == dace.dtypes.StorageType.CPU_Heap)

def test_trivial_realloc(storage_type: dace.dtypes.StorageType, transient: bool):
sdfg = _get_trivial_alloc_sdfg(storage_type, transient)
try:
sdfg.validate()
except Exception:
if not _valid_to_reallocate(transient, storage_type, None):
return
else:
raise AssertionError("Realloc with transient data failed when it was expected not to.")

if not _valid_to_reallocate(transient, storage_type, None):
raise AssertionError("Realloc with non-transient data did not fail when it was expected to.")

sdfg.compile()

def test_realloc_use(storage_type: dace.dtypes.StorageType, transient: bool):
sdfg = _get_assign_map_sdfg(storage_type, transient)
try:
sdfg.validate()
except Exception:
if not _valid_to_reallocate(transient, storage_type, None):
return
else:
raise AssertionError("Realloc-use with transient data failed when it was expected not to.")

if not _valid_to_reallocate(transient, storage_type, None):
raise AssertionError("Realloc-use with non-transient data did not fail when it was expected to.")

sdfg.compile()

def test_incomplete_write_dimensions_1():
sdfg = _get_trivial_alloc_sdfg(dace.dtypes.StorageType.CPU_Heap, True, "1:2")
try:
sdfg.validate()
except Exception:
return

raise AssertionError("Realloc-use with transient data and incomplete write did not fail when it was expected to.")

def test_incomplete_write_dimensions_2():
sdfg = _get_trivial_alloc_sdfg(dace.dtypes.StorageType.CPU_Heap, False, "1:2")
try:
sdfg.validate()
except Exception:
return

raise AssertionError("Realloc-use with non-transient data and incomplete write did not fail when it was expected to.")

def test_realloc_inside_map():
pass

if __name__ == "__main__":
for storage_type in [dace.dtypes.StorageType.CPU_Heap, dace.dtypes.StorageType.GPU_Global]:
print(f"Trivial Realloc with storage {storage_type}")
test_trivial_realloc(storage_type, True)
print(f"Trivial Realloc-Use with storage {storage_type}")
test_realloc_use(storage_type, True)

for storage_type in [dace.dtypes.StorageType.CPU_Heap, dace.dtypes.StorageType.GPU_Global]:
print(f"Trivial Realloc with storage {storage_type} on non-transient data")
test_trivial_realloc(storage_type, False)
print(f"Trivial Realloc-Use with storage {storage_type} on non-transient data")
test_realloc_use(storage_type, False)

# Try some other combinations
for transient in [True, False]:
for storage_type in [dace.dtypes.StorageType.Default, dace.dtypes.StorageType.Register]:
print(f"Trivial Realloc with storage {storage_type} on transient:{transient} data")
test_trivial_realloc(storage_type, transient)
print(f"Trivial Realloc-Use with storage {storage_type} on transient:{transient} data")
test_realloc_use(storage_type, transient)

print(f"Realloc with incomplete write 1")
test_incomplete_write_dimensions_1()
print(f"Realloc with incomplete write 2")
test_incomplete_write_dimensions_2()

0 comments on commit c14b91e

Please sign in to comment.