Skip to content

Commit

Permalink
making the repo compatible with database test configs
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexMontgomerie committed Feb 6, 2024
1 parent 3ef817e commit b81a4c4
Show file tree
Hide file tree
Showing 33 changed files with 741 additions and 930 deletions.
Empty file added fpgaconvnet/__init__.py
Empty file.
18 changes: 17 additions & 1 deletion fpgaconvnet/models/layers/concat/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

@dataclass(kw_only=True)
class ConcatLayerBase(LayerMatchingCoarse, LayerBase):

ports: int
channels: list[int]
data_t: FixedPoint = FixedPoint(16,8)
Expand Down Expand Up @@ -69,6 +68,23 @@ def get_coarse_in_feasible(self) -> list[int]:
def get_coarse_out_feasible(self) -> list[int]:
return self.get_coarse_feasible()

def functional_model(self, *data: np.array) -> np.array:
import torch

assert len(data) == self.ports, f"invalid number of input ports ({len(data)} != {self.ports})"

for i, d in enumerate(data):
assert list(d.shape) == self.input_shape(i), \
f"invalid spatial dimensions for port={i} ({list(d.shape)} != {self.input_shape(i)})"

# return the functional model
return torch.cat([torch.from_numpy(d) for d in data], axis=-1)

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.ports = self.ports
parameters.channels_in_array.extend(self.channels)

@dataclass(kw_only=True)
class ConcatLayerChiselMixin(ConcatLayerBase):

Expand Down
43 changes: 38 additions & 5 deletions fpgaconvnet/models/layers/convolution/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,11 +160,11 @@ def resource(self, model: Optional[ResourceModel] = None) -> dict[str,int]:
def functional_model(self,data,weights,bias,batch_size=1):
import torch

assert data.shape == self.input_shape, "ERROR (data): invalid row dimension"
assert list(data.shape) == list(self.input_shape(0)), "ERROR (data): invalid spatial dimensions"

assert weights.shape[0] == self.filters, "ERROR (weights): invalid filter dimension"
assert weights.shape[1] == self.channels//self.groups, "ERROR (weights): invalid channel dimension"
assert weights.shape[2:] == self.kernel_size, "ERROR (weights): invalid kernel dimension"
assert list(weights.shape[2:]) == list(self.kernel_size), "ERROR (weights): invalid kernel dimension"

assert bias.shape[0] == self.filters, "ERROR (bias): invalid filter dimension"

Expand Down Expand Up @@ -192,11 +192,22 @@ def functional_model(self,data,weights,bias,batch_size=1):
data = torch.nn.functional.pad(torch.from_numpy(data), self.pad, "constant", 0.0)
return convolution_layer(data).detach().numpy()

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.coarse_group = self.coarse_group
parameters.filters = self.filters
parameters.groups = self.groups
parameters.fine = self.fine
self.input_t.to_protobuf(parameters.input_t)
self.weight_t.to_protobuf(parameters.weight_t)
self.acc_t.to_protobuf(parameters.acc_t)
self.output_t.to_protobuf(parameters.output_t)


@dataclass(kw_only=True)
class ConvolutionLayer2DMixin(ConvolutionLayerBase, Layer2D):
kernel_rows: int = 1
kernel_cols: int = 1
kernel_rows: int
kernel_cols: int
stride_rows: int = 1
stride_cols: int = 1
pad_top: int = 0
Expand Down Expand Up @@ -268,9 +279,24 @@ def pipeline_depth(self):
(self.kernel_cols-1)*self.channels//self.coarse_in + \
((self.channels-1)//self.coarse_in)*(self.filters//(self.coarse_out*self.groups))

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.kernel_size.extend(self.kernel_size)
parameters.kernel_rows = self.kernel_rows
parameters.kernel_cols = self.kernel_cols
parameters.stride.extend(self.stride)
parameters.stride_rows = self.stride_rows
parameters.stride_cols = self.stride_cols
# parameters.pad.extend(self.pad)
parameters.pad_top = self.pad_top
parameters.pad_right = self.pad_right
parameters.pad_bottom = self.pad_bottom
parameters.pad_left = self.pad_left


@dataclass(kw_only=True)
class ConvolutionLayer3DMixin(Layer3D, ConvolutionLayer2DMixin):
kernel_depth: int = 1
kernel_depth: int
stride_depth: int = 1
pad_front: int = 0
pad_back: int = 0
Expand Down Expand Up @@ -325,3 +351,10 @@ def pad(self, val: list[int]) -> None:
self.pad_front = val[2]
self.pad_back = val[5]

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.kernel_depth = self.kernel_depth
parameters.stride_depth = self.stride_depth
parameters.pad_front = self.pad_front
parameters.pad_back = self.pad_back

27 changes: 27 additions & 0 deletions fpgaconvnet/models/layers/eltwise/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ class EltwiseLayerBase(LayerMatchingCoarse, LayerBase):
name: ClassVar[str] = "eltwise"

def __post_init__(self):
self.ports_in = self.ports
self.ports_out = 1
self.buffer_depth = [0]*self.ports
super().__post_init__()

Expand All @@ -58,6 +60,31 @@ def __setattr__(self, name: str, value: Any) -> None:
print(f"WARNING: unable to set attribute {name}, trying super method")
super().__setattr__(name, value)

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.ports = self.ports

def functional_model(self, *data: np.array) -> np.array:
import torch

assert len(data) == self.ports, f"invalid number of input ports ({len(data)} != {self.ports})"

for i, d in enumerate(data):
assert list(d.shape) == self.input_shape(i), \
f"invalid spatial dimensions ({list(d.shape)} != {self.input_shape(i)})"

# return the functional model
return torch.add(torch.from_numpy(data[0]), torch.from_numpy(data[0])).detach().numpy()


def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.ports = self.ports
# parameters.op_type = self.op_type
# parameters.broadcast = self.broadcast
# parameters.data_t = self.data_t
self.acc_t.to_protobuf(parameters.acc_t)


@dataclass(kw_only=True)
class EltwiseLayerChiselMixin(EltwiseLayerBase):
Expand Down
8 changes: 6 additions & 2 deletions fpgaconvnet/models/layers/global_pooling/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,14 @@ def get_operations(self):

def functional_model(self, data):

assert data.shape == self.inputs_shape(), "ERROR: invalid input shape dimension"
assert list(data.shape) == self.input_shape(), f"invalid input shape dimension ({data.shape}) != ({self.input_shape()})"

# return output featuremap
return np.average(data, axis=list(range(len(data.shape)-1)))
return np.average(data, axis=tuple(range(len(data.shape)-1)))

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
self.acc_t.to_protobuf(parameters.acc_t)

class GlobalPoolingLayerChiselMixin(GlobalPoolingLayerBase):

Expand Down
37 changes: 19 additions & 18 deletions fpgaconvnet/models/layers/inner_product/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,36 +88,37 @@ def resource(self, model: Optional[ResourceModel] = None) -> dict[str,int]:
def functional_model(self,data,weights,bias,batch_size=1):
import torch

assert data.shape == self.input_shape, "ERROR (data): invalid row dimension"
assert list(data.shape) == self.input_shape(0), \
f"invalid spatial dimensions ({list(data.shape)} != {self.input_shape(0)})"

assert weights.shape[0] == self.filters, "ERROR (weights): invalid filter dimension"
assert weights.shape[1] == self.channels, "ERROR (weights): invalid channel dimension"
assert weights.shape[1] == math.prod(self.input_shape(0)), \
"ERROR (weights): invalid channel dimension"

assert bias.shape[0] == self.filters, "ERROR (bias): invalid filter dimension"

# instantiate convolution layer
convolution_layer = torch.nn.Conv2d(self.channels_in(), self.filters,
self.kernel_size, stride=self.stride, padding=0, groups=self.groups)
# instantiate inner product layer
inner_product_layer = torch.nn.Linear(
math.prod(self.input_shape(0)), self.filters)#, bias=False)

# update weights
convolution_layer.weight = torch.nn.Parameter(torch.from_numpy(weights))
inner_product_layer.weight = torch.nn.Parameter(torch.from_numpy(weights))

# update bias
convolution_layer.bias = torch.nn.Parameter(torch.from_numpy(bias))

# # get the padding
# padding = [
# self.pad_left,
# self.pad_right,
# self.pad_top,
# self.pad_bottom
# ]
inner_product_layer.bias = torch.nn.Parameter(torch.from_numpy(bias))

# return output featuremap
data = np.moveaxis(data, -1, 0)
data = np.moveaxis(data, -1, 0).flatten()
data = np.repeat(data[np.newaxis,...], batch_size, axis=0)
data = torch.nn.functional.pad(torch.from_numpy(data), self.pad, "constant", 0.0)
return convolution_layer(data).detach().numpy()
return inner_product_layer(torch.from_numpy(data)).detach().numpy()

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.filters = self.filters
self.input_t.to_protobuf(parameters.input_t)
self.weight_t.to_protobuf(parameters.weight_t)
self.acc_t.to_protobuf(parameters.acc_t)
self.output_t.to_protobuf(parameters.output_t)


@dataclass(kw_only=True)
Expand Down
29 changes: 28 additions & 1 deletion fpgaconvnet/models/layers/pooling/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ def pad(self) -> list[int]:
def functional_model(self,data,batch_size=1):
import torch

assert data.shape == self.inputs_shape(), "ERROR: invalid input shape dimension"
assert list(data.shape) == self.input_shape(), \
f"invalid input shape dimension ({data.shape}) != ({self.input_shape()})"

# instantiate pooling layer FIXME
pooling_layer = torch.nn.MaxPool2d(self.kernel_size, stride=self.stride, padding=self.pad[0])
Expand All @@ -58,6 +59,11 @@ def functional_model(self,data,batch_size=1):
data = np.repeat(data[np.newaxis,...], batch_size, axis=0)
return pooling_layer(torch.from_numpy(data)).detach().numpy()

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.coarse = self.coarse


class PoolingLayerChiselMixin(PoolingLayerBase):

backend: ClassVar[BACKEND] = BACKEND.CHISEL
Expand Down Expand Up @@ -221,6 +227,20 @@ def pad(self, val: list[int]) -> None:
self.pad_bottom = val[2]
self.pad_left = val[1]

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.kernel_size.extend(self.kernel_size)
parameters.kernel_rows = self.kernel_rows
parameters.kernel_cols = self.kernel_cols
parameters.stride.extend(self.stride)
parameters.stride_rows = self.stride_rows
parameters.stride_cols = self.stride_cols
parameters.pad_top = self.pad_top
parameters.pad_right = self.pad_right
parameters.pad_bottom = self.pad_bottom
parameters.pad_left = self.pad_left


class PoolingLayer3DMixin(PoolingLayer2DMixin, Layer3D):
kernel_depth: int = 2
stride_depth: int = 2
Expand Down Expand Up @@ -270,3 +290,10 @@ def pad(self, val: list[int]) -> None:
self.pad_front = val[4]
self.pad_back = val[5]

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.kernel_depth = self.kernel_depth
parameters.stride_depth = self.stride_depth
parameters.pad_front = self.pad_front
parameters.pad_back = self.pad_back

3 changes: 2 additions & 1 deletion fpgaconvnet/models/layers/relu/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ class ReLULayerBase(LayerMatchingCoarse, LayerBase):
def functional_model(self,data,batch_size=1):
import torch

assert data.shape == self.inputs_shape(), "ERROR: invalid input shape dimension"
assert list(data.shape) == self.input_shape(), \
f"invalid input shape dimension ({data.shape} != {self.input_shape()})"

# instantiate relu layer
relu_layer = torch.nn.ReLU()
Expand Down
20 changes: 20 additions & 0 deletions fpgaconvnet/models/layers/resize/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,26 @@ def get_operations(self):
return math.prod(self.input_shape())


def functional_model(self, data, batch_size=1):
import torch

assert list(data.shape) == self.input_shape(), \
f"invalid input shape dimension ({data.shape} != {self.input_shape()})"

# instantiate resize layer
resize_layer = torch.nn.functional.interpolate

# return output featuremap
data = np.moveaxis(data, -1, 0)
data = np.repeat(data[np.newaxis,...], batch_size, axis=0)
return resize_layer(torch.from_numpy(data),
scale_factor=tuple(self.scales[:-1]),
mode=self.mode).detach().numpy()

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.scales.extend(self.scales)

class ResizeLayerChiselMixin(ResizeLayerBase):

backend: ClassVar[BACKEND] = BACKEND.CHISEL
Expand Down
12 changes: 12 additions & 0 deletions fpgaconvnet/models/layers/split/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,18 @@ def __setattr__(self, name: str, value: Any) -> None:
print(f"WARNING: unable to set attribute {name}, trying super method")
super().__setattr__(name, value)

def functional_model(self,data,batch_size=1):
import torch

assert list(data.shape) == self.input_shape(), \
f"invalid input shape dimension ({data.shape} != {self.input_shape()})"

# duplicate the input data
return [data] * self.ports

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.ports = self.ports

@dataclass(kw_only=True)
class SplitLayerChiselMixin(SplitLayerBase):
Expand Down
3 changes: 2 additions & 1 deletion fpgaconvnet/models/layers/squeeze/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ class SqueezeLayerBase(LayerBase):
def functional_model(self, data, batch_size=1):
import torch

assert data.shape == self.inputs_shape(), "ERROR: invalid input shape dimension"
assert list(data.shape) == self.input_shape(), \
f"invalid input shape dimension ({data.shape} != {self.input_shape()})"

# return the data as is
return data
Expand Down
13 changes: 10 additions & 3 deletions fpgaconvnet/models/layers/traits.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,11 @@ def __setattr__(self, name: str, value: Any) -> None:
def streams(self) -> int:
return self.coarse

def layer_info(self, parameters, batch_size=1):
super().layer_info(parameters, batch_size)
parameters.coarse = self.coarse


@dataclass(kw_only=True)
class Layer2D(LayerBase):
rows: int
Expand Down Expand Up @@ -101,7 +106,7 @@ def get_coarse_out_feasible(self):
return get_factors(self.channels_out())

def layer_info(self, parameters, batch_size=1):
super().layer_info(self, parameters, batch_size)
super().layer_info(parameters, batch_size)
parameters.rows_in = self.rows_in()
parameters.cols_in = self.cols_in()
parameters.channels_in = self.channels_in()
Expand Down Expand Up @@ -149,7 +154,7 @@ def output_shape_dict(self, port_idx: int = 0) -> dict[str,int]:
}

def layer_info(self, parameters, batch_size=1):
super().layer_info(self, parameters, batch_size)
super().layer_info(parameters, batch_size)
parameters.depth_in = self.depth_in()
parameters.depth_out = self.depth_out()

Expand Down Expand Up @@ -238,12 +243,14 @@ def get_coarse_out_feasible(self) -> list[int]:
return list(coarse_out_feasible)

def layer_info(self, parameters, batch_size=1):
super().layer_info(self, parameters, batch_size)
super().layer_info(parameters, batch_size)
parameters.rows_in = self.rows_in()
parameters.cols_in = self.cols_in()
parameters.channels_in = self.channels_in()
parameters.rows_out = self.rows_out()
parameters.cols_out = self.cols_out()
parameters.channels_out = self.channels_out()
parameters.ports_in = self.ports_in
parameters.ports_out = self.ports_out


Loading

0 comments on commit b81a4c4

Please sign in to comment.