diff --git a/docs/about/release_notes.md b/docs/about/release_notes.md index 1f5a15691..556181ce3 100644 --- a/docs/about/release_notes.md +++ b/docs/about/release_notes.md @@ -1,5 +1,11 @@ # Release notes +## v3.1.3 (04/02/2026) + +* Add tests for mapping non-sequential networks +* Fix networks used for tests, they didnt meet the hardware constraints +* Update documentation with the non-sequential networks types that have a test + ## v3.1.2 (12/12/2025) * Update use of visualizer in Sinabs tutorials, previous tutorials were using a deprecated visualizer version. diff --git a/docs/getting_started/iaf_neuron_model.ipynb b/docs/getting_started/iaf_neuron_model.ipynb index e920d55ad..992a31205 100644 --- a/docs/getting_started/iaf_neuron_model.ipynb +++ b/docs/getting_started/iaf_neuron_model.ipynb @@ -163,7 +163,6 @@ "import torch\n", "import sinabs.layers as sl\n", "\n", - "\n", "# Define a neuron in 'SINABS'\n", "neuron = sl.IAF()\n", "\n", diff --git a/docs/speck/faqs/available_network_arch.md b/docs/speck/faqs/available_network_arch.md index f25cb78ac..7ae38a3b3 100644 --- a/docs/speck/faqs/available_network_arch.md +++ b/docs/speck/faqs/available_network_arch.md @@ -39,28 +39,31 @@ dynapcnn.to(devcie="your device", chip_layers_ordering=[2, 5, 7, 1]) ## What network structure can I define? -`Sinabs` can parse a `torch.nn.Sequential` like architecture, so it is recommended to -use a `Sequential` like network. +`Sinabs` can parse a `torch.nn.Sequential` like architecture, so it is recommended to use a `Sequential` like network. As of `v3.1.0`, we released a network graph extraction feature that helps users deploy their networks with more complex architectures into the devkit. Our `Speck` chip, in fact, supports branched architectures. With the graph extraction feature, we support a range of network structures, as shown below: +A network with a merge and a split: -Two independent networks: - -![Two independent networks](imgs/two-independent-networks.png) +![A network with a merge and a split](imgs/network-with-merge-and-split.png) Two networks with merging outputs: ![Two networks with merging outputs](imgs/two-networks-merging-output.png) -A network with a merge and a split: +A network with residual connections: -![A network with a merge and a split](imgs/network-with-merge-and-split.png) +![A network with residual connections](imgs/network-with-residual-connection.png) + +A more complex network: +![A more complex network](imgs/complex-network.png) Note: with the graph extracture feature it is possible to implement recurrent neural networks. However, this is not recommended or supported as it can result in deadlock on the chip. +Note2: the use of two parallel network although supported by our chip was not fully considered in our sinabs implementation. + ## How to make use of the graph extraction feature? For general architectures, users need to define their classes, by defining at least the `__init__` method with all the layers, as well as an appropriate `forward` method. @@ -68,64 +71,70 @@ For general architectures, users need to define their classes, by defining at le Here is an example to define a network with a merge and a split: ```python +import torch import torch.nn as nn from sinabs.activation.surrogate_gradient_fn import PeriodicExponential from sinabs.layers import IAFSqueeze, Merge, SumPool2d + class SNN(nn.Module): def __init__(self, batch_size) -> None: super().__init__() - - self.conv1 = nn.Conv2d(2, 1, 2, 1, bias=False) - self.iaf1 = IAFSqueeze( + # -- graph node A -- + self.conv_A = nn.Conv2d(2, 4, 2, 1, bias=False) + self.iaf_A = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - - self.conv2 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf2 = IAFSqueeze( + # -- graph node B -- + self.conv_B = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf2_B = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool2 = SumPool2d(2, 2) - - self.conv3 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf3 = IAFSqueeze( + self.pool_B = SumPool2d(2, 2) + # -- graph node C -- + self.conv_C = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_C = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool3 = SumPool2d(2, 2) - self.pool3a = SumPool2d(5, 5) - - self.conv4 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf4 = IAFSqueeze( + self.pool_C = SumPool2d(2, 2) + # -- graph node D -- + self.conv_D = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_D = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool4 = SumPool2d(3, 3) - - self.flat1 = nn.Flatten() - self.flat2 = nn.Flatten() - - self.conv5 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf5 = IAFSqueeze( + # -- graph node E -- + self.conv_E = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf3_E = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - - self.fc2 = nn.Linear(25, 10, bias=False) - self.iaf2_fc = IAFSqueeze( + self.pool_E = SumPool2d(2, 2) + # -- graph node F -- + self.conv_F = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_F = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + # -- graph node G -- + self.fc3 = nn.Linear(144, 10, bias=False) + self.iaf3_fc = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, @@ -134,43 +143,47 @@ class SNN(nn.Module): # -- merges -- self.merge1 = Merge() - self.merge2 = Merge() + + # -- falts -- + self.flat_D = nn.Flatten() + self.flat_F = nn.Flatten() def forward(self, x): # conv 1 - A/0 - con1_out = self.conv1(x) - iaf1_out = self.iaf1(con1_out) + convA_out = self.conv_A(x) + iaf_A_out = self.iaf_A(convA_out) # conv 2 - B/1 - conv2_out = self.conv2(iaf1_out) - iaf2_out = self.iaf2(conv2_out) - pool2_out = self.pool2(iaf2_out) + conv_B_out = self.conv_B(iaf_A_out) + iaf_B_out = self.iaf2_B(conv_B_out) + pool_B_out = self.pool_B(iaf_B_out) # conv 3 - C/2 - conv3_out = self.conv3(iaf1_out) - iaf3_out = self.iaf3(conv3_out) - pool3_out = self.pool3(iaf3_out) - pool3a_out = self.pool3a(iaf3_out) - - # conv 4 - D/3 - merge1_out = self.merge1(pool2_out, pool3_out) - conv4_out = self.conv4(merge1_out) - iaf4_out = self.iaf4(conv4_out) - pool4_out = self.pool4(iaf4_out) - flat1_out = self.flat1(pool4_out) - - # conv 5 - E/4 - conv5_out = self.conv5(pool3a_out) - iaf5_out = self.iaf5(conv5_out) - flat2_out = self.flat2(iaf5_out) - - # fc 2 - F/5 - merge2_out = self.merge2(flat2_out, flat1_out) - - fc2_out = self.fc2(merge2_out) - iaf2_fc_out = self.iaf2_fc(fc2_out) - - return iaf2_fc_out + conv_C_out = self.conv_C(pool_B_out) + iaf_C_out = self.iaf_C(conv_C_out) + pool_C_out = self.pool_C(iaf_C_out) + + # conv 4 - D/4 + conv_D_out = self.conv_D(pool_C_out) + iaf_D_out = self.iaf_D(conv_D_out) + # fc 1 - E/3 + conv_E_out = self.conv_E(pool_B_out) + iaf3_E_out = self.iaf3_E(conv_E_out) + pool_E_out = self.pool_E(iaf3_E_out) + + # fc 2 - F/6 + conv_F_out = self.conv_F(pool_E_out) + iaf_F_out = self.iaf_F(conv_F_out) + + # fc 2 - G/5 + flat_D_out = self.flat_D(iaf_D_out) + flat_F_out = self.flat_F(iaf_F_out) + + merge1_out = self.merge1(flat_D_out, flat_F_out) + fc3_out = self.fc3(merge1_out) + iaf3_fc_out = self.iaf3_fc(fc3_out) + + return iaf3_fc_out ``` ## Can I achieve a "Residual Connection" like ResNet does? @@ -180,9 +193,9 @@ change the `samna.speck2f.configuration.CNNLayerDestination.layer` to achieve th familiar with the `samna-configuration`. You can also make use of our network graph extraction feature, to implement residual networks. -## How to use "Residual Connection" manually? +## How can I define "Residual Connection" manually? -Alright! Here I will give an example of achieving the "Residual Connection" by manually modify the `samna-configuration`. +You can also achieve "Residual Connection" by manually modify the `samna-configuration`. Let's say you want an architecture like below: @@ -221,7 +234,7 @@ class ResidualBlock(nn.Module): ``` -Since currently Sinabs can only parse Sequential like network, we need to do some tedious work like below: +You can write it like: ```python # define a Sequential first @@ -264,8 +277,8 @@ devkit.get_model().apply_configuration(samna_cfg) ``` -I have to say it is not an elegant solution though, it should help you to achieve an initial Residual Block. We will -improve this part after Sinabs has the ability for extracting model's graph. +It is a lot of manual work but it will let you have your Residual Block. +We recommend to use our network graph extraction feature for residual connections. ## What execution order should I be aware of when I am implementing a sequential structure? You should be aware with the internal layer order. diff --git a/docs/speck/faqs/imgs/complex-network.png b/docs/speck/faqs/imgs/complex-network.png new file mode 100644 index 000000000..18c55b619 Binary files /dev/null and b/docs/speck/faqs/imgs/complex-network.png differ diff --git a/docs/speck/faqs/imgs/network-with-residual-connection.png b/docs/speck/faqs/imgs/network-with-residual-connection.png new file mode 100644 index 000000000..d1456353b Binary files /dev/null and b/docs/speck/faqs/imgs/network-with-residual-connection.png differ diff --git a/docs/speck/faqs/imgs/two-independent-networks.png b/docs/speck/faqs/imgs/two-independent-networks.png deleted file mode 100644 index 66483143b..000000000 Binary files a/docs/speck/faqs/imgs/two-independent-networks.png and /dev/null differ diff --git a/docs/speck/notebooks/nmnist_quick_start.ipynb b/docs/speck/notebooks/nmnist_quick_start.ipynb index bcf975e64..5eb9edf64 100644 --- a/docs/speck/notebooks/nmnist_quick_start.ipynb +++ b/docs/speck/notebooks/nmnist_quick_start.ipynb @@ -97,7 +97,6 @@ "source": [ "from torch import nn\n", "\n", - "\n", "# define a CNN model\n", "cnn = nn.Sequential(\n", " # [2, 34, 34] -> [8, 17, 17]\n", @@ -1181,7 +1180,6 @@ "source": [ "from sinabs.backend.dynapcnn.dynapcnn_visualizer import DynapcnnVisualizer\n", "\n", - "\n", "visualizer = DynapcnnVisualizer(\n", " window_scale=(4, 8),\n", " dvs_shape=(34, 34),\n", diff --git a/docs/speck/notebooks/play_with_speck_dvs.ipynb b/docs/speck/notebooks/play_with_speck_dvs.ipynb index aaf055559..0aa3ac613 100644 --- a/docs/speck/notebooks/play_with_speck_dvs.ipynb +++ b/docs/speck/notebooks/play_with_speck_dvs.ipynb @@ -103,7 +103,6 @@ "from sinabs.backend.dynapcnn.dynapcnn_visualizer import DynapcnnVisualizer\n", "from sinabs.backend.dynapcnn import DynapcnnNetwork\n", "\n", - "\n", "# create a dummy snn for DynapcnnNetwork initialization\n", "snn = nn.Sequential(\n", " nn.Conv2d(1, 1, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False),\n", diff --git a/docs/tutorials/nmnist.ipynb b/docs/tutorials/nmnist.ipynb index 555f92a5c..31d8b876a 100644 --- a/docs/tutorials/nmnist.ipynb +++ b/docs/tutorials/nmnist.ipynb @@ -292,7 +292,6 @@ "source": [ "from tqdm.notebook import tqdm\n", "\n", - "\n", "n_epochs = 1\n", "optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n", "crit = nn.functional.cross_entropy\n", diff --git a/sinabs/backend/dynapcnn/chips/dynapcnn.py b/sinabs/backend/dynapcnn/chips/dynapcnn.py index e1da34af2..77b5987f5 100644 --- a/sinabs/backend/dynapcnn/chips/dynapcnn.py +++ b/sinabs/backend/dynapcnn/chips/dynapcnn.py @@ -101,7 +101,7 @@ def get_dynapcnn_layer_config_dict( dimensions["input_shape"]["feature_count"] = channel_count # dimensions["output_feature_count"] already done in conv2d_to_dict - (f, h, w) = layer.get_neuron_shape() + f, h, w = layer.get_neuron_shape() dimensions["output_shape"]["size"] = {} dimensions["output_shape"]["feature_count"] = f dimensions["output_shape"]["size"]["x"] = w @@ -121,7 +121,7 @@ def get_dynapcnn_layer_config_dict( config_dict["dimensions"] = dimensions # Update parameters from convolution if layer.conv_layer.bias is not None: - (weights, biases) = layer.conv_layer.parameters() + weights, biases = layer.conv_layer.parameters() else: (weights,) = layer.conv_layer.parameters() biases = torch.zeros(layer.conv_layer.out_channels) diff --git a/sinabs/backend/dynapcnn/dynapcnn_visualizer.py b/sinabs/backend/dynapcnn/dynapcnn_visualizer.py index f19210bc9..cb6abe220 100644 --- a/sinabs/backend/dynapcnn/dynapcnn_visualizer.py +++ b/sinabs/backend/dynapcnn/dynapcnn_visualizer.py @@ -455,7 +455,7 @@ def connect( # Streamer graph # Dvs node - (_, dvs_member_filter, _, streamer_node) = self.streamer_graph.sequential( + _, dvs_member_filter, _, streamer_node = self.streamer_graph.sequential( [ dynapcnn_network.samna_device.get_model_source_node(), samna.graph.JitMemberSelect(), @@ -526,7 +526,7 @@ def connect( ## Readout node if "r" in self.gui_type: if self.readout_node == "JitMajorityReadout": - (_, majority_readout_node, _) = self.streamer_graph.sequential( + _, majority_readout_node, _ = self.streamer_graph.sequential( [ spike_collection_node, samna.graph.JitMajorityReadout(samna.ui.Event), @@ -544,7 +544,7 @@ def connect( self.readout_default_threshold_high ) else: - (_, majority_readout_node, _) = self.streamer_graph.sequential( + _, majority_readout_node, _ = self.streamer_graph.sequential( [ spike_collection_node, self.readout_node, diff --git a/sinabs/backend/dynapcnn/nir_graph_extractor.py b/sinabs/backend/dynapcnn/nir_graph_extractor.py index 30f5ef5bf..c14c41b81 100644 --- a/sinabs/backend/dynapcnn/nir_graph_extractor.py +++ b/sinabs/backend/dynapcnn/nir_graph_extractor.py @@ -504,7 +504,7 @@ def _add_dvs_node(self, dvs_input_shape: Tuple[int, int, int]) -> DVSLayer: A handler to the newly added `DVSLayer` instance. """ - (features, height, width) = dvs_input_shape + features, height, width = dvs_input_shape if features > 2: raise ValueError( f"A DVSLayer istance can have a max feature dimension of 2 but {features} was given." @@ -578,7 +578,7 @@ def _validate_dvs_setup(self, dvs_input_shape: Tuple[int, int, int]) -> None: f"A DVSLayer node exists and there are {nb_entries} entry nodes in the graph: the DVSLayer should be the only entry node." ) - (features, _, _) = dvs_input_shape + features, _, _ = dvs_input_shape if features > 2: raise ValueError( @@ -752,7 +752,7 @@ def _get_nodes_io_shapes( # different input sources to a core to have the same output shapes. if any(inp.shape != inputs[0].shape for inp in inputs): raise ValueError( - f"Layer `sinabs.layers.merge.Merge` (node {node}) requires input tensors with the same shape" + f"Layer `sinabs.layers.merge.Merge` (node {node}) requires input tensors with the same shape." ) # forward input through the node. @@ -828,6 +828,7 @@ def _find_source_of_input_to(self, node: int) -> int: if len(sources) == 0: return -1 if len(sources) > 1: + # return -1 #TODO: why throw a runtime error when the documentation explicitly say -1 in case of more than one input node? raise RuntimeError(f"Node {node} has more than 1 input") return sources.pop() diff --git a/sinabs/layers/pool2d.py b/sinabs/layers/pool2d.py index c9aeb37e1..3958086e0 100644 --- a/sinabs/layers/pool2d.py +++ b/sinabs/layers/pool2d.py @@ -80,7 +80,7 @@ def get_output_shape(self, input_shape: Tuple) -> Tuple: Returns: (channelsOut, height_out, width_out) """ - (channels, height, width) = input_shape + channels, height, width = input_shape height_out = conv_output_size( height + sum(self.padding[2:]), self.pool_size[0], self.strides[0] diff --git a/tests/test_dynapcnn/hw_utils.py b/tests/hw_utils.py similarity index 100% rename from tests/test_dynapcnn/hw_utils.py rename to tests/hw_utils.py diff --git a/tests/test_dynapcnn/__init__.py b/tests/test_dynapcnn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_dynapcnn/test_device_movement.py b/tests/test_dynapcnn/test_device_movement.py index 34d84c5ec..f8864c24b 100644 --- a/tests/test_dynapcnn/test_device_movement.py +++ b/tests/test_dynapcnn/test_device_movement.py @@ -3,7 +3,7 @@ from sinabs.backend.dynapcnn import DynapcnnNetwork from sinabs.from_torch import from_model -from hw_utils import find_open_devices +from tests.hw_utils import find_open_devices ann = nn.Sequential( nn.Conv2d(1, 20, 5, 1, bias=False), diff --git a/tests/test_dynapcnn/test_discover_device.py b/tests/test_dynapcnn/test_discover_device.py index caae4eef1..cecd55b3a 100644 --- a/tests/test_dynapcnn/test_discover_device.py +++ b/tests/test_dynapcnn/test_discover_device.py @@ -2,7 +2,7 @@ import samna from sinabs.backend.dynapcnn import io -from hw_utils import find_open_devices +from tests.hw_utils import find_open_devices def test_is_device_type(): diff --git a/tests/test_dynapcnn/test_large_net.py b/tests/test_dynapcnn/test_large_net.py index 4c7c9432a..27937b354 100644 --- a/tests/test_dynapcnn/test_large_net.py +++ b/tests/test_dynapcnn/test_large_net.py @@ -14,7 +14,7 @@ from sinabs.backend.dynapcnn.dynapcnn_network import DynapcnnNetwork from sinabs.from_torch import from_model from sinabs.layers import NeuromorphicReLU -from hw_utils import find_open_devices +from tests.hw_utils import find_open_devices class DynapCnnNetA(nn.Module): diff --git a/tests/test_dynapcnn/test_neuron_leak.py b/tests/test_dynapcnn/test_neuron_leak.py index 7bc0c63b5..769e366c5 100644 --- a/tests/test_dynapcnn/test_neuron_leak.py +++ b/tests/test_dynapcnn/test_neuron_leak.py @@ -3,12 +3,7 @@ import pytest import samna import torch -from hw_utils import ( - find_open_devices, - get_ones_network, - is_any_samna_device_connected, - is_device_connected, -) +from tests.hw_utils import find_open_devices, is_any_samna_device_connected from torch import nn from sinabs.backend.dynapcnn import DynapcnnNetwork diff --git a/tests/test_dynapcnn/test_single_neuron_hardware.py b/tests/test_dynapcnn/test_single_neuron_hardware.py index d7d730222..728c21ac4 100644 --- a/tests/test_dynapcnn/test_single_neuron_hardware.py +++ b/tests/test_dynapcnn/test_single_neuron_hardware.py @@ -1,9 +1,8 @@ import pytest -from hw_utils import ( +from tests.hw_utils import ( find_open_devices, get_ones_network, is_any_samna_device_connected, - is_device_connected, reset_all_connected_boards, ) diff --git a/tests/test_dynapcnn/test_visualizer.py b/tests/test_dynapcnn/test_visualizer.py index bcc1c6219..dbe1fe397 100644 --- a/tests/test_dynapcnn/test_visualizer.py +++ b/tests/test_dynapcnn/test_visualizer.py @@ -3,9 +3,8 @@ import pytest import samna -from custom_jit_filters import majority_readout_filter as custom_filter -from hw_utils import find_open_devices, is_any_samna_device_connected - +from .custom_jit_filters import majority_readout_filter as custom_filter +from tests.hw_utils import find_open_devices from sinabs.backend.dynapcnn.dynapcnn_visualizer import DynapcnnVisualizer @@ -89,7 +88,7 @@ def test_jit_compilation(): streamer_graph = samna.graph.EventFilterGraph() # Streamer graph # Dvs node - (_, dvs_member_filter, _, streamer_node) = streamer_graph.sequential( + _, dvs_member_filter, _, streamer_node = streamer_graph.sequential( [ # samna.graph.JitSource(samna.speck2e.event.OutputEvent), dynapcnn_network.samna_device.get_model_source_node(), diff --git a/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py b/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py index df12d90e9..f5005e968 100644 --- a/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py +++ b/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py @@ -14,6 +14,11 @@ from .model_dummy_4 import expected_output as expected_output_4 from .model_dummy_4 import input_shape as input_shape_4 from .model_dummy_4 import snn as snn_4 +from .model_dummy_5 import batch_size as batch_size_5 +from .model_dummy_5 import expected_output as expected_output_5 +from .model_dummy_5 import input_shape as input_shape_5 +from .model_dummy_5 import snn as snn_5 + from .model_dummy_seq import ( expected_seq_1, expected_seq_2, @@ -30,3 +35,13 @@ (seq_1, input_shape_seq, 1, expected_seq_1), (seq_2, input_shape_seq, 1, expected_seq_2), ] + +args_DynapcnnNetworkMappingTest = [ + (snn_1, input_shape_1, batch_size_1), + (snn_2, input_shape_2, batch_size_2), + (snn_3, input_shape_3, batch_size_3), + (snn_4, input_shape_4, batch_size_4), + (seq_1, input_shape_seq, 1), + (seq_2, input_shape_seq, 1), + # (snn_5, input_shape_5, batch_size_5), #TODO: forward method needs implementation +] diff --git a/tests/test_dynapcnnnetwork/model_dummy_1.py b/tests/test_dynapcnnnetwork/model_dummy_1.py index c24aabe15..6b7ef3edc 100644 --- a/tests/test_dynapcnnnetwork/model_dummy_1.py +++ b/tests/test_dynapcnnnetwork/model_dummy_1.py @@ -13,49 +13,49 @@ class SNN(nn.Module): def __init__(self, batch_size) -> None: super().__init__() - self.conv1 = nn.Conv2d(2, 10, 2, 1, bias=False) # node 0 + self.conv1 = nn.Conv2d(2, 10, 2, 1, bias=False) self.iaf1 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 1 - self.pool1 = nn.AvgPool2d(3, 3) # node 2 - self.pool1a = nn.AvgPool2d(4, 4) # node 3 + ) + self.pool1 = nn.AvgPool2d(2, 2) + self.pool1a = nn.AvgPool2d(2, 2) - self.conv2 = nn.Conv2d(10, 10, 4, 1, bias=False) # node 4 + self.conv2 = nn.Conv2d(10, 10, 1, 1, bias=False) self.iaf2 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 6 + ) - self.conv3 = nn.Conv2d(10, 1, 2, 1, bias=False) # node 8 + self.conv3 = nn.Conv2d(10, 1, 2, 1, bias=False) self.iaf3 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 9 + ) self.flat = nn.Flatten() - self.fc1 = nn.Linear(49, 500, bias=False) # node 10 + self.fc1 = nn.Linear(225, 200, bias=False) self.iaf4 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 11 + ) - self.fc2 = nn.Linear(500, 10, bias=False) # node 12 + self.fc2 = nn.Linear(200, 10, bias=False) self.iaf5 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 13 + ) self.adder = Merge() @@ -67,14 +67,13 @@ def forward(self, x): conv2_out = self.conv2(pool1_out) iaf2_out = self.iaf2(conv2_out) - conv3_out = self.conv3(self.adder(pool1a_out, iaf2_out)) iaf3_out = self.iaf3(conv3_out) - flat_out = self.flat(iaf3_out) fc1_out = self.fc1(flat_out) iaf4_out = self.iaf4(fc1_out) + fc2_out = self.fc2(iaf4_out) iaf5_out = self.iaf5(fc2_out) diff --git a/tests/test_dynapcnnnetwork/model_dummy_2.py b/tests/test_dynapcnnnetwork/model_dummy_2.py index cff57e8d4..2fc3af5ce 100644 --- a/tests/test_dynapcnnnetwork/model_dummy_2.py +++ b/tests/test_dynapcnnnetwork/model_dummy_2.py @@ -81,39 +81,38 @@ def __init__(self, batch_size) -> None: def forward(self, x): # conv 1 - A/0 - convA_out = self.conv_A(x) # node 0 - iaf_A_out = self.iaf_A(convA_out) # node 1 + convA_out = self.conv_A(x) + iaf_A_out = self.iaf_A(convA_out) # conv 2 - B/1 - conv_B_out = self.conv_B(iaf_A_out) # node 2 - iaf_B_out = self.iaf2_B(conv_B_out) # node 3 - pool_B_out = self.pool_B(iaf_B_out) # node 4 + conv_B_out = self.conv_B(iaf_A_out) + iaf_B_out = self.iaf2_B(conv_B_out) + pool_B_out = self.pool_B(iaf_B_out) # conv 3 - C/2 - conv_C_out = self.conv_C(pool_B_out) # node 5 - iaf_C_out = self.iaf_C(conv_C_out) # node 7 - pool_C_out = self.pool_C(iaf_C_out) # node 8 + conv_C_out = self.conv_C(pool_B_out) + iaf_C_out = self.iaf_C(conv_C_out) + pool_C_out = self.pool_C(iaf_C_out) # conv 4 - D/4 - conv_D_out = self.conv_D(pool_C_out) # node 9 - iaf_D_out = self.iaf_D(conv_D_out) # node 10 - + conv_D_out = self.conv_D(pool_C_out) + iaf_D_out = self.iaf_D(conv_D_out) # fc 1 - E/3 - conv_E_out = self.conv_E(pool_B_out) # node 6 - iaf3_E_out = self.iaf3_E(conv_E_out) # node 12 - pool_E_out = self.pool_E(iaf3_E_out) # node 13 + conv_E_out = self.conv_E(pool_B_out) + iaf3_E_out = self.iaf3_E(conv_E_out) + pool_E_out = self.pool_E(iaf3_E_out) # fc 2 - F/6 - conv_F_out = self.conv_F(pool_E_out) # node 14 - iaf_F_out = self.iaf_F(conv_F_out) # node 15 + conv_F_out = self.conv_F(pool_E_out) + iaf_F_out = self.iaf_F(conv_F_out) # fc 2 - G/5 - flat_D_out = self.flat_D(iaf_D_out) # node 11 - flat_F_out = self.flat_F(iaf_F_out) # node 16 + flat_D_out = self.flat_D(iaf_D_out) + flat_F_out = self.flat_F(iaf_F_out) - merge1_out = self.merge1(flat_D_out, flat_F_out) # node 19 - fc3_out = self.fc3(merge1_out) # node 17 - iaf3_fc_out = self.iaf3_fc(fc3_out) # node 18 + merge1_out = self.merge1(flat_D_out, flat_F_out) + fc3_out = self.fc3(merge1_out) + iaf3_fc_out = self.iaf3_fc(fc3_out) return iaf3_fc_out diff --git a/tests/test_dynapcnnnetwork/model_dummy_4.py b/tests/test_dynapcnnnetwork/model_dummy_4.py index 92fd4417f..9092cf7d0 100644 --- a/tests/test_dynapcnnnetwork/model_dummy_4.py +++ b/tests/test_dynapcnnnetwork/model_dummy_4.py @@ -38,7 +38,7 @@ def __init__(self, batch_size) -> None: surrogate_grad_fn=PeriodicExponential(), ) self.pool3 = SumPool2d(2, 2) - self.pool3a = SumPool2d(5, 5) + self.pool3a = SumPool2d(2, 2) self.conv4 = nn.Conv2d(1, 1, 2, 1, bias=False) self.iaf4 = IAFSqueeze( @@ -47,7 +47,7 @@ def __init__(self, batch_size) -> None: spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool4 = SumPool2d(3, 3) + self.pool4 = SumPool2d(2, 2) self.flat1 = nn.Flatten() self.flat2 = nn.Flatten() @@ -59,8 +59,9 @@ def __init__(self, batch_size) -> None: spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) + self.pool5 = SumPool2d(2, 2) - self.fc2 = nn.Linear(25, 10, bias=False) + self.fc2 = nn.Linear(49, 10, bias=False) self.iaf2_fc = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, @@ -98,7 +99,8 @@ def forward(self, x): # conv 5 - E/4 conv5_out = self.conv5(pool3a_out) iaf5_out = self.iaf5(conv5_out) - flat2_out = self.flat2(iaf5_out) + pool5_out = self.pool5(iaf5_out) + flat2_out = self.flat2(pool5_out) # fc 2 - F/5 merge2_out = self.merge2(flat2_out, flat1_out) diff --git a/tests/test_dynapcnnnetwork/model_dummy_5.py b/tests/test_dynapcnnnetwork/model_dummy_5.py new file mode 100644 index 000000000..82354d2b6 --- /dev/null +++ b/tests/test_dynapcnnnetwork/model_dummy_5.py @@ -0,0 +1,199 @@ +""" +implementing "two independent networks" in https://github.com/synsense/sinabs/issues/181 +TODO: This is not working yet +""" + +import torch +import torch.nn as nn + +from sinabs.activation.surrogate_gradient_fn import PeriodicExponential +from sinabs.layers import IAFSqueeze, Merge, SumPool2d + + +class SNN(nn.Module): + def __init__(self, batch_size) -> None: + super().__init__() + + self.conv_A = nn.Conv2d(2, 4, 2, 1, bias=False) + + self.iaf_A = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.conv_B = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_B = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_B = SumPool2d(2, 2) + + self.conv_C = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_C = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_C = SumPool2d(2, 2) + + self.conv_D = nn.Conv2d(2, 4, 2, 1, bias=False) + self.iaf_D = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.conv_E = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_E = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_E = SumPool2d(2, 2) + + self.conv_F = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_F = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_F = SumPool2d(2, 2) + + self.flat_brach1 = nn.Flatten() + self.flat_brach2 = nn.Flatten() + + self.fc1 = nn.Linear(196, 100, bias=False) + self.iaf1_fc = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.fc2 = nn.Linear(100, 100, bias=False) + self.iaf2_fc = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.fc3 = nn.Linear(100, 10, bias=False) + self.iaf3_fc = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + def forward(self, x): + # conv 1 - A + conv_A_out = self.conv_A(x) + iaf_A_out = self.iaf_A(conv_A_out) + # conv 2 - B + conv_B_out = self.conv_B(iaf_A_out) + iaf_B_out = self.iaf_B(conv_B_out) + pool_B_out = self.pool_B(iaf_B_out) + # conv 3 - C + conv_C_out = self.conv_C(pool_B_out) + iaf_C_out = self.iaf_C(conv_C_out) + pool_C_out = self.pool_C(iaf_C_out) + + # --- + + # conv 4 - D + conv_D_out = self.conv_D(x) + iaf_D_out = self.iaf_D(conv_D_out) + # conv 5 - E + conv_E_out = self.conv_E(iaf_D_out) + iaf_E_out = self.iaf_E(conv_E_out) + pool_E_out = self.pool_E(iaf_E_out) + # conv 6 - F + conv_F_out = self.conv_F(pool_E_out) + iaf_F_out = self.iaf_F(conv_F_out) + pool_F_out = self.pool_F(iaf_F_out) + + # --- + + flat_brach1_out = self.flat_brach1(pool_C_out) + flat_brach2_out = self.flat_brach2(pool_F_out) + + # FC 7 - G + fc1_out = self.fc1(flat_brach1_out) + iaf1_fc_out = self.iaf1_fc(fc1_out) + + # FC 8 - H + fc2_out1 = self.fc2(iaf1_fc_out) + iaf2_fc_out1 = self.iaf2_fc(fc2_out1) + + fc3_out1 = self.fc3(iaf2_fc_out1) + iaf3_fc3_out1 = self.iaf3_fc(fc3_out1) + + # FC 8 - H + fc1_out2 = self.fc1(flat_brach2_out) + iaf2_fc_out2 = self.iaf1_fc(fc1_out2) + + fc2_out2 = self.fc2(iaf2_fc_out2) + iaf2_fc3_out2 = self.iaf2_fc(fc2_out2) + + fc3_out_2 = self.fc3(iaf2_fc3_out2) + iaf3_fc3_out2 = self.iaf3_fc(fc3_out_2) + + return iaf3_fc3_out1, iaf3_fc3_out2 + + +channels = 2 +height = 34 +width = 34 +batch_size = 2 +input_shape = (channels, height, width) + +snn = SNN(batch_size) + +expected_output = { + "dcnnl_edges": { + (0, 2), + (2, 4), + (4, 6), + (6, 7), + (1, 3), + (3, 5), + (5, 6), + (7, 8), + ("input", 0), + ("input", 1), + }, + "node_source_map": { + 0: {"input"}, + 2: {0}, + 4: {2}, + 6: {4, 5}, + 1: {"input"}, + 3: {1}, + 5: {3}, + 7: {6}, + 8: {7}, + }, + "destination_map": { + 0: {2}, + 2: {4}, + 4: {6}, + 6: {7}, + 1: {3}, + 3: {5}, + 5: {6}, + 7: {8}, + 8: {-1}, + }, + "sorted_nodes": [0, 1, 2, 3, 4, 5, 6, 7, 8], + "output_shape": torch.Size([2, 10, 1, 1]), + "entry_points": {0, 1}, +} diff --git a/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py b/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py index de694ace6..3706a98f4 100644 --- a/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py +++ b/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py @@ -2,8 +2,12 @@ import torch from sinabs.backend.dynapcnn.dynapcnn_network import DynapcnnNetwork +from tests.hw_utils import find_open_devices, supported_device_types_for_testing -from .conftest_dynapcnnnetwork import args_DynapcnnNetworkTest +from .conftest_dynapcnnnetwork import ( + args_DynapcnnNetworkTest, + args_DynapcnnNetworkMappingTest, +) @pytest.mark.parametrize( @@ -58,3 +62,22 @@ def test_DynapcnnNetwork(snn, input_shape, batch_size, expected_output): assert ( expected_output["output_shape"] == output.shape ), "wrong model output tensor shape." + + +@pytest.mark.parametrize( + "snn, input_shape, batch_size", args_DynapcnnNetworkMappingTest +) +def test_DynapcnnNetwork_movement(snn, input_shape, batch_size): + """Tests if the network can be mapped to the hardware""" + + dcnnnet = DynapcnnNetwork(snn, input_shape, batch_size) + + devices = find_open_devices() + + if len(devices) == 0: + pytest.skip("A connected Speck device is required to run this test") + + # deploy to devkit + for device in devices: + if device in supported_device_types_for_testing: + dcnnnet.to(device=device, monitor_layers=["dvs", -1])