Skip to content

Commit

Permalink
Restore original dynapcnn layer attribute names conv_layer and spk_layer
Browse files Browse the repository at this point in the history
  • Loading branch information
bauerfe committed Oct 22, 2024
1 parent 2d3d32c commit 482499e
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 37 deletions.
54 changes: 27 additions & 27 deletions sinabs/backend/dynapcnn/chips/dynapcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,26 +45,26 @@ def set_kill_bits(cls, layer: DynapcnnLayer, config_dict: dict) -> dict:
"""
config_dict = copy.deepcopy(config_dict)

if layer.conv.bias is not None:
(weights, biases) = layer.conv.parameters()
if layer.conv_layer.bias is not None:
(weights, biases) = layer.conv_layer.parameters()
else:
(weights,) = layer.conv.parameters()
biases = torch.zeros(layer.conv.out_channels)
(weights,) = layer.conv_layer.parameters()
biases = torch.zeros(layer.conv_layer.out_channels)

config_dict["weights_kill_bit"] = (~weights.bool()).tolist()
config_dict["biases_kill_bit"] = (~biases.bool()).tolist()

# - Neuron states
if not layer.spk.is_state_initialised():
if not layer.spk_layer.is_state_initialised():
# then we assign no initial neuron state to DYNAP-CNN.
f, h, w = layer.get_neuron_shape()
neurons_state = torch.zeros(f, w, h)
elif layer.spk.v_mem.dim() == 4:
elif layer.spk_layer.v_mem.dim() == 4:
# 4-dimensional states should be the norm when there is a batch dim
neurons_state = layer.spk.v_mem.transpose(2, 3)[0]
neurons_state = layer.spk_layer.v_mem.transpose(2, 3)[0]
else:
raise ValueError(
f"Current v_mem (shape: {layer.spk.v_mem.shape}) of spiking layer not understood."
f"Current v_mem (shape: {layer.spk_layer.v_mem.shape}) of spiking layer not understood."
)

config_dict["neurons_value_kill_bit"] = (
Expand Down Expand Up @@ -96,24 +96,24 @@ def get_dynapcnn_layer_config_dict(
dimensions["output_shape"]["size"]["x"] = w
dimensions["output_shape"]["size"]["y"] = h
dimensions["padding"] = {
"x": layer.conv.padding[1],
"y": layer.conv.padding[0],
"x": layer.conv_layer.padding[1],
"y": layer.conv_layer.padding[0],
}
dimensions["stride"] = {
"x": layer.conv.stride[1],
"y": layer.conv.stride[0],
"x": layer.conv_layer.stride[1],
"y": layer.conv_layer.stride[0],
}
dimensions["kernel_size"] = layer.conv.kernel_size[0]
dimensions["kernel_size"] = layer.conv_layer.kernel_size[0]

if dimensions["kernel_size"] != layer.conv.kernel_size[1]:
if dimensions["kernel_size"] != layer.conv_layer.kernel_size[1]:
raise ValueError("Conv2d: Kernel must have same height and width.")
config_dict["dimensions"] = dimensions
# Update parameters from convolution
if layer.conv.bias is not None:
(weights, biases) = layer.conv.parameters()
if layer.conv_layer.bias is not None:
(weights, biases) = layer.conv_layer.parameters()
else:
(weights,) = layer.conv.parameters()
biases = torch.zeros(layer.conv.out_channels)
(weights,) = layer.conv_layer.parameters()
biases = torch.zeros(layer.conv_layer.out_channels)
weights = weights.transpose(2, 3) # Need this to match samna convention
config_dict["weights"] = weights.int().tolist()
config_dict["biases"] = biases.int().tolist()
Expand All @@ -122,36 +122,36 @@ def get_dynapcnn_layer_config_dict(
# Update parameters from the spiking layer

# - Neuron states
if not layer.spk.is_state_initialised():
if not layer.spk_layer.is_state_initialised():
# then we assign no initial neuron state to DYNAP-CNN.
f, h, w = layer.get_neuron_shape()
neurons_state = torch.zeros(f, w, h)
elif layer.spk.v_mem.dim() == 4:
elif layer.spk_layer.v_mem.dim() == 4:
# 4-dimensional states should be the norm when there is a batch dim
neurons_state = layer.spk.v_mem.transpose(2, 3)[0]
neurons_state = layer.spk_layer.v_mem.transpose(2, 3)[0]
else:
raise ValueError(
f"Current v_mem (shape: {layer.spk.v_mem.shape}) of spiking layer not understood."
f"Current v_mem (shape: {layer.spk_layer.v_mem.shape}) of spiking layer not understood."
)

# - Resetting vs returning to 0
if isinstance(layer.spk.reset_fn, sinabs.activation.MembraneReset):
if isinstance(layer.spk_layer.reset_fn, sinabs.activation.MembraneReset):
return_to_zero = True
elif isinstance(layer.spk.reset_fn, sinabs.activation.MembraneSubtract):
elif isinstance(layer.spk_layer.reset_fn, sinabs.activation.MembraneSubtract):
return_to_zero = False
else:
raise Exception(
"Unknown reset mechanism. Only MembraneReset and MembraneSubtract are currently understood."
)

if layer.spk.min_v_mem is None:
if layer.spk_layer.min_v_mem is None:
min_v_mem = -(2**15)
else:
min_v_mem = int(layer.spk.min_v_mem)
min_v_mem = int(layer.spk_layer.min_v_mem)
config_dict.update(
{
"return_to_zero": return_to_zero,
"threshold_high": int(layer.spk.spike_threshold),
"threshold_high": int(layer.spk_layer.spike_threshold),
"threshold_low": min_v_mem,
"monitor_enable": False,
"neurons_initial_value": neurons_state.int().tolist(),
Expand Down
20 changes: 10 additions & 10 deletions sinabs/backend/dynapcnn/dynapcnn_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,11 +134,11 @@ def __init__(
self._spk = spk

@property
def conv(self):
def conv_layer(self):
return self._conv

@property
def spk(self):
def spk_layer(self):
return self._spk

@property
Expand Down Expand Up @@ -167,8 +167,8 @@ def forward(self, x) -> List[torch.Tensor]:

returns = []

x = self.conv(x)
x = self.spk(x)
x = self.conv_layer(x)
x = self.spk_layer(x)

for pool in self._pool:

Expand Down Expand Up @@ -254,7 +254,7 @@ def memory_summary(self):
"kernel": c * pow(2, np.ceil(np.log2(h * w)) + np.ceil(np.log2(f))),
"neuron": f
* pow(2, np.ceil(np.log2(neuron_height)) + np.ceil(np.log2(neuron_width))),
"bias": 0 if self.conv.bias is None else len(self.conv.bias),
"bias": 0 if self.conv_layer.bias is None else len(self.conv_layer.bias),
}

####################################################### Private Methods #######################################################
Expand All @@ -268,11 +268,11 @@ def _get_conv_output_shape(self) -> Tuple[int, int, int]:
"""
# get the layer's parameters.

out_channels = self.conv.out_channels
kernel_size = self.conv.kernel_size
stride = self.conv.stride
padding = self.conv.padding
dilation = self.conv.dilation
out_channels = self.conv_layer.out_channels
kernel_size = self.conv_layer.kernel_size
stride = self.conv_layer.stride
padding = self.conv_layer.padding
dilation = self.conv_layer.dilation

# compute the output height and width.
out_height = (
Expand Down

0 comments on commit 482499e

Please sign in to comment.