Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
phschaad authored Jan 16, 2024
2 parents b0149a7 + fa305d2 commit 1ba6b54
Show file tree
Hide file tree
Showing 17 changed files with 321 additions and 175 deletions.
238 changes: 151 additions & 87 deletions dace/codegen/compiled_sdfg.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions dace/codegen/cppunparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -746,9 +746,9 @@ def _Repr(self, t):
def _Num(self, t):
t_n = t.value if sys.version_info >= (3, 8) else t.n
repr_n = repr(t_n)
# For complex values, use DTYPE_TO_TYPECLASS dictionary
# For complex values, use ``dtype_to_typeclass``
if isinstance(t_n, complex):
dtype = dtypes.DTYPE_TO_TYPECLASS[complex]
dtype = dtypes.dtype_to_typeclass(complex)

# Handle large integer values
if isinstance(t_n, int):
Expand Down
5 changes: 3 additions & 2 deletions dace/codegen/targets/framecode.py
Original file line number Diff line number Diff line change
Expand Up @@ -887,8 +887,9 @@ def generate_code(self,

# NOTE: NestedSDFGs frequently contain tautologies in their symbol mapping, e.g., `'i': i`. Do not
# redefine the symbols in such cases.
if (not is_top_level and isvarName in sdfg.parent_nsdfg_node.symbol_mapping
and str(sdfg.parent_nsdfg_node.symbol_mapping[isvarName]) == str(isvarName)):
# Additionally, do not redefine a symbol with its type if it was already defined
# as part of the function's arguments
if not is_top_level and isvarName in sdfg.parent_nsdfg_node.symbol_mapping:
continue
isvar = data.Scalar(isvarType)
callsite_stream.write('%s;\n' % (isvar.as_arg(with_types=True, name=isvarName)), sdfg)
Expand Down
10 changes: 8 additions & 2 deletions dace/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,15 @@ def create_datadescriptor(obj, no_custom_desc=False):
else:
dtype = dtypes.typeclass(obj.dtype.type)
return Array(dtype=dtype, strides=tuple(s // obj.itemsize for s in obj.strides), shape=obj.shape)
# special case for torch tensors. Maybe __array__ could be used here for a more
# general solution, but torch doesn't support __array__ for cuda tensors.
elif type(obj).__module__ == "cupy" and type(obj).__name__ == "ndarray":
# special case for CuPy and HIP, which does not support __cuda_array_interface__
storage = dtypes.StorageType.GPU_Global
dtype = dtypes.typeclass(obj.dtype.type)
itemsize = obj.itemsize
return Array(dtype=dtype, shape=obj.shape, strides=tuple(s // itemsize for s in obj.strides), storage=storage)
elif type(obj).__module__ == "torch" and type(obj).__name__ == "Tensor":
# special case for torch tensors. Maybe __array__ could be used here for a more
# general solution, but torch doesn't support __array__ for cuda tensors.
try:
# If torch is importable, define translations between typeclasses and torch types. These are reused by daceml.
# conversion happens here in pytorch:
Expand Down
86 changes: 57 additions & 29 deletions dace/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,7 @@ class typeclass(object):
2. Enabling declaration syntax: `dace.float32[M,N]`
3. Enabling extensions such as `dace.struct` and `dace.vector`
"""

def __init__(self, wrapped_type, typename=None):
# Convert python basic types
if isinstance(wrapped_type, str):
Expand Down Expand Up @@ -600,6 +601,7 @@ def result_type_of(lhs, *rhs):

class opaque(typeclass):
""" A data type for an opaque object, useful for C bindings/libnodes, i.e., MPI_Request. """

def __init__(self, typename):
self.type = typename
self.ctype = typename
Expand Down Expand Up @@ -635,6 +637,7 @@ class pointer(typeclass):
Example use:
`dace.pointer(dace.struct(x=dace.float32, y=dace.float32))`. """

def __init__(self, wrapped_typeclass):
self._typeclass = wrapped_typeclass
self.type = wrapped_typeclass.type
Expand Down Expand Up @@ -680,6 +683,7 @@ class vector(typeclass):
Example use: `dace.vector(dace.float32, 4)` becomes float4.
"""

def __init__(self, dtype: typeclass, vector_length: int):
self.vtype = dtype
self.type = dtype.type
Expand Down Expand Up @@ -737,6 +741,7 @@ class stringtype(pointer):
Python/generated code marshalling.
Used internally when `str` types are given
"""

def __init__(self):
super().__init__(int8)

Expand All @@ -756,6 +761,7 @@ class struct(typeclass):
Example use: `dace.struct(a=dace.int32, b=dace.float64)`.
"""

def __init__(self, name, **fields_and_types):
# self._data = fields_and_types
self.type = ctypes.Structure
Expand Down Expand Up @@ -859,6 +865,7 @@ class pyobject(opaque):
It cannot be used inside a DaCe program, but can be passed back to other Python callbacks.
Use with caution, and ensure the value is not removed by the garbage collector or the program will crash.
"""

def __init__(self):
super().__init__('pyobject')
self.bytes = ctypes.sizeof(ctypes.c_void_p)
Expand Down Expand Up @@ -892,6 +899,7 @@ def example(A: dace.float64[20], constant: dace.compiletime):
In the above code, ``constant`` will be replaced with its value at call time
during parsing.
"""

@staticmethod
def __descriptor__():
raise ValueError('All compile-time arguments must be provided in order to compile the SDFG ahead-of-time.')
Expand All @@ -914,6 +922,7 @@ class callback(typeclass):
"""
Looks like ``dace.callback([None, <some_native_type>], *types)``
"""

def __init__(self, return_types, *variadic_args):
from dace import data
if return_types is None:
Expand Down Expand Up @@ -1240,31 +1249,39 @@ class Typeclasses(aenum.AutoNumberEnum):
complex128 = complex128


DTYPE_TO_TYPECLASS = {
bool: typeclass(bool),
int: typeclass(int),
float: typeclass(float),
complex: typeclass(complex),
numpy.bool_: bool_,
numpy.int8: int8,
numpy.int16: int16,
numpy.int32: int32,
numpy.int64: int64,
numpy.intc: int32,
numpy.uint8: uint8,
numpy.uint16: uint16,
numpy.uint32: uint32,
numpy.uint64: uint64,
numpy.uintc: uint32,
numpy.float16: float16,
numpy.float32: float32,
numpy.float64: float64,
numpy.complex64: complex64,
numpy.complex128: complex128,
# FIXME
numpy.longlong: int64,
numpy.ulonglong: uint64
}
_bool = bool


def dtype_to_typeclass(dtype=None):
DTYPE_TO_TYPECLASS = {
_bool: typeclass(_bool),
int: typeclass(int),
float: typeclass(float),
complex: typeclass(complex),
numpy.bool_: bool_,
numpy.int8: int8,
numpy.int16: int16,
numpy.int32: int32,
numpy.int64: int64,
numpy.intc: int32,
numpy.uint8: uint8,
numpy.uint16: uint16,
numpy.uint32: uint32,
numpy.uint64: uint64,
numpy.uintc: uint32,
numpy.float16: float16,
numpy.float32: float32,
numpy.float64: float64,
numpy.complex64: complex64,
numpy.complex128: complex128,
# FIXME
numpy.longlong: int64,
numpy.ulonglong: uint64
}
if dtype is None:
return DTYPE_TO_TYPECLASS
return DTYPE_TO_TYPECLASS[dtype]


# Since this overrides the builtin bool, this should be after the
# DTYPE_TO_TYPECLASS dictionary
Expand Down Expand Up @@ -1354,6 +1371,7 @@ def isallowed(var, allow_recursive=False):
class DebugInfo:
""" Source code location identifier of a node/edge in an SDFG. Used for
IDE and debugging purposes. """

def __init__(self, start_line, start_column=0, end_line=-1, end_column=0, filename=None):
self.start_line = start_line
self.end_line = end_line if end_line >= 0 else start_line
Expand Down Expand Up @@ -1397,6 +1415,7 @@ def json_to_typeclass(obj, context=None):
def paramdec(dec):
""" Parameterized decorator meta-decorator. Enables using `@decorator`,
`@decorator()`, and `@decorator(...)` with the same function. """

@wraps(dec)
def layer(*args, **kwargs):
from dace import data
Expand Down Expand Up @@ -1478,20 +1497,22 @@ def can_allocate(storage: StorageType, schedule: ScheduleType):
# Host-only allocation
if storage in [StorageType.CPU_Heap, StorageType.CPU_Pinned, StorageType.CPU_ThreadLocal]:
return schedule in [
ScheduleType.CPU_Multicore, ScheduleType.CPU_Persistent, ScheduleType.Sequential, ScheduleType.MPI, ScheduleType.GPU_Default
ScheduleType.CPU_Multicore, ScheduleType.CPU_Persistent, ScheduleType.Sequential, ScheduleType.MPI,
ScheduleType.GPU_Default
]

# GPU-global memory
if storage is StorageType.GPU_Global:
return schedule in [
ScheduleType.CPU_Multicore, ScheduleType.CPU_Persistent, ScheduleType.Sequential, ScheduleType.MPI, ScheduleType.GPU_Default
ScheduleType.CPU_Multicore, ScheduleType.CPU_Persistent, ScheduleType.Sequential, ScheduleType.MPI,
ScheduleType.GPU_Default
]

# FPGA-global memory
if storage is StorageType.FPGA_Global:
return schedule in [
ScheduleType.CPU_Multicore, ScheduleType.CPU_Persistent, ScheduleType.Sequential, ScheduleType.MPI, ScheduleType.FPGA_Device,
ScheduleType.GPU_Default
ScheduleType.CPU_Multicore, ScheduleType.CPU_Persistent, ScheduleType.Sequential, ScheduleType.MPI,
ScheduleType.FPGA_Device, ScheduleType.GPU_Default
]

# FPGA-local memory
Expand Down Expand Up @@ -1536,6 +1557,8 @@ def is_array(obj: Any) -> bool:
return hasattr(obj, 'shape') and len(obj.shape) > 0
except TypeError: # PyTorch scalar objects define an attribute called shape that cannot be used
return False
if hasattr(obj, 'data') and hasattr(obj.data, 'ptr'): # CuPy special case with HIP
return True
return False


Expand All @@ -1556,4 +1579,9 @@ def is_gpu_array(obj: Any) -> bool:
# In PyTorch, accessing this attribute throws a runtime error for
# variables that require grad, or KeyError when a boolean array is used
return False

if hasattr(obj, 'data') and hasattr(obj.data, 'ptr'): # CuPy special case with HIP
if hasattr(obj, 'device') and getattr(obj.device, 'id', -1) >= 0:
return True

return False
8 changes: 4 additions & 4 deletions dace/frontend/python/newast.py
Original file line number Diff line number Diff line change
Expand Up @@ -3240,7 +3240,7 @@ def _visit_assign(self, node, node_target, op, dtype=None, is_return=False):
raise DaceSyntaxError(self, target, 'Variable "{}" used before definition'.format(name))

new_data, rng = None, None
dtype_keys = tuple(dtypes.DTYPE_TO_TYPECLASS.keys())
dtype_keys = tuple(dtypes.dtype_to_typeclass().keys())
if not (result in self.sdfg.symbols or symbolic.issymbolic(result) or isinstance(result, dtype_keys) or
(isinstance(result, str) and result in self.sdfg.arrays)):
raise DaceSyntaxError(
Expand Down Expand Up @@ -4653,14 +4653,14 @@ def visit_Num(self, node: NumConstant):
if isinstance(node.n, bool):
return dace.bool_(node.n)
if isinstance(node.n, (int, float, complex)):
return dtypes.DTYPE_TO_TYPECLASS[type(node.n)](node.n)
return dtypes.dtype_to_typeclass(type(node.n))(node.n)
return node.n

def visit_Constant(self, node: ast.Constant):
if isinstance(node.value, bool):
return dace.bool_(node.value)
if isinstance(node.value, (int, float, complex)):
return dtypes.DTYPE_TO_TYPECLASS[type(node.value)](node.value)
return dtypes.dtype_to_typeclass(type(node.value))(node.value)
if isinstance(node.value, (str, bytes)):
return StringLiteral(node.value)
return node.value
Expand Down Expand Up @@ -4745,7 +4745,7 @@ def _gettype(self, opnode: ast.AST) -> List[Tuple[str, str]]:
result.append((operand, type(self.sdfg.arrays[operand])))
elif isinstance(operand, str) and operand in self.scope_arrays:
result.append((operand, type(self.scope_arrays[operand])))
elif isinstance(operand, tuple(dtypes.DTYPE_TO_TYPECLASS.keys())):
elif isinstance(operand, tuple(dtypes.dtype_to_typeclass().keys())):
if isinstance(operand, (bool, numpy.bool_)):
result.append((operand, 'BoolConstant'))
else:
Expand Down
Loading

0 comments on commit 1ba6b54

Please sign in to comment.