From 5d13fae5e0985d133b49716e33670a8502e225a3 Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Fri, 30 Jan 2026 04:56:00 +0100 Subject: [PATCH 01/11] Format code Black update to 26.1.0 requires different formatting --- docs/getting_started/iaf_neuron_model.ipynb | 1 - docs/speck/notebooks/nmnist_quick_start.ipynb | 2 -- docs/speck/notebooks/play_with_speck_dvs.ipynb | 1 - docs/tutorials/nmnist.ipynb | 1 - sinabs/backend/dynapcnn/chips/dynapcnn.py | 4 ++-- sinabs/backend/dynapcnn/dynapcnn_visualizer.py | 6 +++--- sinabs/backend/dynapcnn/nir_graph_extractor.py | 4 ++-- sinabs/layers/pool2d.py | 2 +- tests/test_dynapcnn/test_visualizer.py | 2 +- 9 files changed, 9 insertions(+), 14 deletions(-) diff --git a/docs/getting_started/iaf_neuron_model.ipynb b/docs/getting_started/iaf_neuron_model.ipynb index e920d55ad..992a31205 100644 --- a/docs/getting_started/iaf_neuron_model.ipynb +++ b/docs/getting_started/iaf_neuron_model.ipynb @@ -163,7 +163,6 @@ "import torch\n", "import sinabs.layers as sl\n", "\n", - "\n", "# Define a neuron in 'SINABS'\n", "neuron = sl.IAF()\n", "\n", diff --git a/docs/speck/notebooks/nmnist_quick_start.ipynb b/docs/speck/notebooks/nmnist_quick_start.ipynb index bcf975e64..5eb9edf64 100644 --- a/docs/speck/notebooks/nmnist_quick_start.ipynb +++ b/docs/speck/notebooks/nmnist_quick_start.ipynb @@ -97,7 +97,6 @@ "source": [ "from torch import nn\n", "\n", - "\n", "# define a CNN model\n", "cnn = nn.Sequential(\n", " # [2, 34, 34] -> [8, 17, 17]\n", @@ -1181,7 +1180,6 @@ "source": [ "from sinabs.backend.dynapcnn.dynapcnn_visualizer import DynapcnnVisualizer\n", "\n", - "\n", "visualizer = DynapcnnVisualizer(\n", " window_scale=(4, 8),\n", " dvs_shape=(34, 34),\n", diff --git a/docs/speck/notebooks/play_with_speck_dvs.ipynb b/docs/speck/notebooks/play_with_speck_dvs.ipynb index aaf055559..0aa3ac613 100644 --- a/docs/speck/notebooks/play_with_speck_dvs.ipynb +++ b/docs/speck/notebooks/play_with_speck_dvs.ipynb @@ -103,7 +103,6 @@ "from sinabs.backend.dynapcnn.dynapcnn_visualizer import DynapcnnVisualizer\n", "from sinabs.backend.dynapcnn import DynapcnnNetwork\n", "\n", - "\n", "# create a dummy snn for DynapcnnNetwork initialization\n", "snn = nn.Sequential(\n", " nn.Conv2d(1, 1, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False),\n", diff --git a/docs/tutorials/nmnist.ipynb b/docs/tutorials/nmnist.ipynb index 555f92a5c..31d8b876a 100644 --- a/docs/tutorials/nmnist.ipynb +++ b/docs/tutorials/nmnist.ipynb @@ -292,7 +292,6 @@ "source": [ "from tqdm.notebook import tqdm\n", "\n", - "\n", "n_epochs = 1\n", "optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n", "crit = nn.functional.cross_entropy\n", diff --git a/sinabs/backend/dynapcnn/chips/dynapcnn.py b/sinabs/backend/dynapcnn/chips/dynapcnn.py index e1da34af2..77b5987f5 100644 --- a/sinabs/backend/dynapcnn/chips/dynapcnn.py +++ b/sinabs/backend/dynapcnn/chips/dynapcnn.py @@ -101,7 +101,7 @@ def get_dynapcnn_layer_config_dict( dimensions["input_shape"]["feature_count"] = channel_count # dimensions["output_feature_count"] already done in conv2d_to_dict - (f, h, w) = layer.get_neuron_shape() + f, h, w = layer.get_neuron_shape() dimensions["output_shape"]["size"] = {} dimensions["output_shape"]["feature_count"] = f dimensions["output_shape"]["size"]["x"] = w @@ -121,7 +121,7 @@ def get_dynapcnn_layer_config_dict( config_dict["dimensions"] = dimensions # Update parameters from convolution if layer.conv_layer.bias is not None: - (weights, biases) = layer.conv_layer.parameters() + weights, biases = layer.conv_layer.parameters() else: (weights,) = layer.conv_layer.parameters() biases = torch.zeros(layer.conv_layer.out_channels) diff --git a/sinabs/backend/dynapcnn/dynapcnn_visualizer.py b/sinabs/backend/dynapcnn/dynapcnn_visualizer.py index f19210bc9..cb6abe220 100644 --- a/sinabs/backend/dynapcnn/dynapcnn_visualizer.py +++ b/sinabs/backend/dynapcnn/dynapcnn_visualizer.py @@ -455,7 +455,7 @@ def connect( # Streamer graph # Dvs node - (_, dvs_member_filter, _, streamer_node) = self.streamer_graph.sequential( + _, dvs_member_filter, _, streamer_node = self.streamer_graph.sequential( [ dynapcnn_network.samna_device.get_model_source_node(), samna.graph.JitMemberSelect(), @@ -526,7 +526,7 @@ def connect( ## Readout node if "r" in self.gui_type: if self.readout_node == "JitMajorityReadout": - (_, majority_readout_node, _) = self.streamer_graph.sequential( + _, majority_readout_node, _ = self.streamer_graph.sequential( [ spike_collection_node, samna.graph.JitMajorityReadout(samna.ui.Event), @@ -544,7 +544,7 @@ def connect( self.readout_default_threshold_high ) else: - (_, majority_readout_node, _) = self.streamer_graph.sequential( + _, majority_readout_node, _ = self.streamer_graph.sequential( [ spike_collection_node, self.readout_node, diff --git a/sinabs/backend/dynapcnn/nir_graph_extractor.py b/sinabs/backend/dynapcnn/nir_graph_extractor.py index 30f5ef5bf..beecd50af 100644 --- a/sinabs/backend/dynapcnn/nir_graph_extractor.py +++ b/sinabs/backend/dynapcnn/nir_graph_extractor.py @@ -504,7 +504,7 @@ def _add_dvs_node(self, dvs_input_shape: Tuple[int, int, int]) -> DVSLayer: A handler to the newly added `DVSLayer` instance. """ - (features, height, width) = dvs_input_shape + features, height, width = dvs_input_shape if features > 2: raise ValueError( f"A DVSLayer istance can have a max feature dimension of 2 but {features} was given." @@ -578,7 +578,7 @@ def _validate_dvs_setup(self, dvs_input_shape: Tuple[int, int, int]) -> None: f"A DVSLayer node exists and there are {nb_entries} entry nodes in the graph: the DVSLayer should be the only entry node." ) - (features, _, _) = dvs_input_shape + features, _, _ = dvs_input_shape if features > 2: raise ValueError( diff --git a/sinabs/layers/pool2d.py b/sinabs/layers/pool2d.py index c9aeb37e1..3958086e0 100644 --- a/sinabs/layers/pool2d.py +++ b/sinabs/layers/pool2d.py @@ -80,7 +80,7 @@ def get_output_shape(self, input_shape: Tuple) -> Tuple: Returns: (channelsOut, height_out, width_out) """ - (channels, height, width) = input_shape + channels, height, width = input_shape height_out = conv_output_size( height + sum(self.padding[2:]), self.pool_size[0], self.strides[0] diff --git a/tests/test_dynapcnn/test_visualizer.py b/tests/test_dynapcnn/test_visualizer.py index bcc1c6219..8ac42747f 100644 --- a/tests/test_dynapcnn/test_visualizer.py +++ b/tests/test_dynapcnn/test_visualizer.py @@ -89,7 +89,7 @@ def test_jit_compilation(): streamer_graph = samna.graph.EventFilterGraph() # Streamer graph # Dvs node - (_, dvs_member_filter, _, streamer_node) = streamer_graph.sequential( + _, dvs_member_filter, _, streamer_node = streamer_graph.sequential( [ # samna.graph.JitSource(samna.speck2e.event.OutputEvent), dynapcnn_network.samna_device.get_model_source_node(), From b7d549b17dcc596c36acbc4ca0e6cc41192d67e2 Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Mon, 2 Feb 2026 10:58:34 +0100 Subject: [PATCH 02/11] Remove two indepent networks from documentation We need to add more tests --- docs/speck/faqs/available_network_arch.md | 5 - tests/test_dynapcnnnetwork/model_dummy_5.py | 199 ++++++++++++++++++++ 2 files changed, 199 insertions(+), 5 deletions(-) create mode 100644 tests/test_dynapcnnnetwork/model_dummy_5.py diff --git a/docs/speck/faqs/available_network_arch.md b/docs/speck/faqs/available_network_arch.md index f25cb78ac..654120be9 100644 --- a/docs/speck/faqs/available_network_arch.md +++ b/docs/speck/faqs/available_network_arch.md @@ -45,11 +45,6 @@ use a `Sequential` like network. As of `v3.1.0`, we released a network graph extraction feature that helps users deploy their networks with more complex architectures into the devkit. Our `Speck` chip, in fact, supports branched architectures. With the graph extraction feature, we support a range of network structures, as shown below: - -Two independent networks: - -![Two independent networks](imgs/two-independent-networks.png) - Two networks with merging outputs: ![Two networks with merging outputs](imgs/two-networks-merging-output.png) diff --git a/tests/test_dynapcnnnetwork/model_dummy_5.py b/tests/test_dynapcnnnetwork/model_dummy_5.py new file mode 100644 index 000000000..a49736865 --- /dev/null +++ b/tests/test_dynapcnnnetwork/model_dummy_5.py @@ -0,0 +1,199 @@ +""" +implementing "two independendt networks" in https://github.com/synsense/sinabs/issues/181 +TODO: This is not working yet +""" + +import torch +import torch.nn as nn + +from sinabs.activation.surrogate_gradient_fn import PeriodicExponential +from sinabs.layers import IAFSqueeze, Merge, SumPool2d + + +class SNN(nn.Module): + def __init__(self, batch_size) -> None: + super().__init__() + + self.conv_A = nn.Conv2d(2, 4, 2, 1, bias=False) + self.iaf_A = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.conv_B = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_B = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_B = SumPool2d(2, 2) + + self.conv_C = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_C = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_C = SumPool2d(2, 2) + + self.conv_D = nn.Conv2d(2, 4, 2, 1, bias=False) + self.iaf_D = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.conv_E = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_E = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_E = SumPool2d(2, 2) + + self.conv_F = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_F = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + self.pool_F = SumPool2d(2, 2) + + self.flat_brach1 = nn.Flatten() + self.flat_brach2 = nn.Flatten() + + self.fc1 = nn.Linear(196, 100, bias=False) + self.iaf1_fc = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.fc2 = nn.Linear(100, 100, bias=False) + self.iaf2_fc = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + self.fc3 = nn.Linear(100, 10, bias=False) + self.iaf3_fc = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + + def forward(self, x): + # conv 1 - A + conv_A_out = self.conv_A(x) + iaf_A_out = self.iaf_A(conv_A_out) + # conv 2 - B + conv_B_out = self.conv_B(iaf_A_out) + iaf_B_out = self.iaf_B(conv_B_out) + pool_B_out = self.pool_B(iaf_B_out) + # conv 3 - C + conv_C_out = self.conv_C(pool_B_out) + iaf_C_out = self.iaf_C(conv_C_out) + pool_C_out = self.pool_C(iaf_C_out) + + # --- + + # conv 4 - D + conv_D_out = self.conv_D(x) + iaf_D_out = self.iaf_D(conv_D_out) + # conv 5 - E + conv_E_out = self.conv_E(iaf_D_out) + iaf_E_out = self.iaf_E(conv_E_out) + pool_E_out = self.pool_E(iaf_E_out) + # conv 6 - F + conv_F_out = self.conv_F(pool_E_out) + iaf_F_out = self.iaf_F(conv_F_out) + pool_F_out = self.pool_F(iaf_F_out) + + # --- + + flat_brach1_out = self.flat_brach1(pool_C_out) + flat_brach2_out = self.flat_brach2(pool_F_out) + + # FC 7 - G + fc1_out = self.fc1(flat_brach1_out) + iaf1_fc_out = self.iaf1_fc(fc1_out) + + # FC 8 - H + fc2_out1 = self.fc2(iaf1_fc_out) + iaf2_fc_out1 = self.iaf2_fc(fc2_out1) + + fc3_out1 = self.fc3(iaf2_fc_out1) + iaf3_fc3_out1 = self.iaf3_fc(fc3_out1) + + + # FC 8 - H + fc1_out2 = self.fc1(flat_brach2_out) + iaf2_fc_out2 = self.iaf1_fc(fc1_out2) + + fc2_out2 = self.fc2(iaf2_fc_out2) + iaf3_fc3_out2 = self.iaf2_fc(fc2_out2) + + fc3_out_2 = self.fc3(iaf3_fc3_out2) + iaf3_fc3_out2 = self.iaf3_fc(fc3_out_2) + + return iaf3_fc3_out1, iaf3_fc3_out2 + + +channels = 2 +height = 34 +width = 34 +batch_size = 2 +input_shape = (channels, height, width) + +snn = SNN(batch_size) + +expected_output = { + "dcnnl_edges": { + (0, 2), + (2, 4), + (4, 6), + (6, 7), + (1, 3), + (3, 5), + (5, 6), + (7, 8), + ("input", 0), + ("input", 1), + }, + "node_source_map": { + 0: {"input"}, + 2: {0}, + 4: {2}, + 6: {4, 5}, + 1: {"input"}, + 3: {1}, + 5: {3}, + 7: {6}, + 8: {7}, + }, + "destination_map": { + 0: {2}, + 2: {4}, + 4: {6}, + 6: {7}, + 1: {3}, + 3: {5}, + 5: {6}, + 7: {8}, + 8: {-1}, + }, + "sorted_nodes": [0, 1, 2, 3, 4, 5, 6, 7, 8], + "output_shape": torch.Size([2, 10, 1, 1]), + "entry_points": {0, 1}, +} From ed6c90a8948ca333714d215e4dfcada5636c115b Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 08:47:57 +0100 Subject: [PATCH 03/11] Add infrastructure for test Two parallel network implementation still need some tweak --- .../conftest_dynapcnnnetwork.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py b/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py index df12d90e9..f5005e968 100644 --- a/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py +++ b/tests/test_dynapcnnnetwork/conftest_dynapcnnnetwork.py @@ -14,6 +14,11 @@ from .model_dummy_4 import expected_output as expected_output_4 from .model_dummy_4 import input_shape as input_shape_4 from .model_dummy_4 import snn as snn_4 +from .model_dummy_5 import batch_size as batch_size_5 +from .model_dummy_5 import expected_output as expected_output_5 +from .model_dummy_5 import input_shape as input_shape_5 +from .model_dummy_5 import snn as snn_5 + from .model_dummy_seq import ( expected_seq_1, expected_seq_2, @@ -30,3 +35,13 @@ (seq_1, input_shape_seq, 1, expected_seq_1), (seq_2, input_shape_seq, 1, expected_seq_2), ] + +args_DynapcnnNetworkMappingTest = [ + (snn_1, input_shape_1, batch_size_1), + (snn_2, input_shape_2, batch_size_2), + (snn_3, input_shape_3, batch_size_3), + (snn_4, input_shape_4, batch_size_4), + (seq_1, input_shape_seq, 1), + (seq_2, input_shape_seq, 1), + # (snn_5, input_shape_5, batch_size_5), #TODO: forward method needs implementation +] From 4eda0861752efa5430db6d45e547e4a7b4574e26 Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 08:51:23 +0100 Subject: [PATCH 04/11] Update dummy models to meet hardware requirements --- tests/test_dynapcnnnetwork/model_dummy_1.py | 27 ++++++++++----------- tests/test_dynapcnnnetwork/model_dummy_4.py | 10 +++++--- tests/test_dynapcnnnetwork/model_dummy_5.py | 8 +++--- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/tests/test_dynapcnnnetwork/model_dummy_1.py b/tests/test_dynapcnnnetwork/model_dummy_1.py index c24aabe15..6b7ef3edc 100644 --- a/tests/test_dynapcnnnetwork/model_dummy_1.py +++ b/tests/test_dynapcnnnetwork/model_dummy_1.py @@ -13,49 +13,49 @@ class SNN(nn.Module): def __init__(self, batch_size) -> None: super().__init__() - self.conv1 = nn.Conv2d(2, 10, 2, 1, bias=False) # node 0 + self.conv1 = nn.Conv2d(2, 10, 2, 1, bias=False) self.iaf1 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 1 - self.pool1 = nn.AvgPool2d(3, 3) # node 2 - self.pool1a = nn.AvgPool2d(4, 4) # node 3 + ) + self.pool1 = nn.AvgPool2d(2, 2) + self.pool1a = nn.AvgPool2d(2, 2) - self.conv2 = nn.Conv2d(10, 10, 4, 1, bias=False) # node 4 + self.conv2 = nn.Conv2d(10, 10, 1, 1, bias=False) self.iaf2 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 6 + ) - self.conv3 = nn.Conv2d(10, 1, 2, 1, bias=False) # node 8 + self.conv3 = nn.Conv2d(10, 1, 2, 1, bias=False) self.iaf3 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 9 + ) self.flat = nn.Flatten() - self.fc1 = nn.Linear(49, 500, bias=False) # node 10 + self.fc1 = nn.Linear(225, 200, bias=False) self.iaf4 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 11 + ) - self.fc2 = nn.Linear(500, 10, bias=False) # node 12 + self.fc2 = nn.Linear(200, 10, bias=False) self.iaf5 = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), - ) # node 13 + ) self.adder = Merge() @@ -67,14 +67,13 @@ def forward(self, x): conv2_out = self.conv2(pool1_out) iaf2_out = self.iaf2(conv2_out) - conv3_out = self.conv3(self.adder(pool1a_out, iaf2_out)) iaf3_out = self.iaf3(conv3_out) - flat_out = self.flat(iaf3_out) fc1_out = self.fc1(flat_out) iaf4_out = self.iaf4(fc1_out) + fc2_out = self.fc2(iaf4_out) iaf5_out = self.iaf5(fc2_out) diff --git a/tests/test_dynapcnnnetwork/model_dummy_4.py b/tests/test_dynapcnnnetwork/model_dummy_4.py index 92fd4417f..9092cf7d0 100644 --- a/tests/test_dynapcnnnetwork/model_dummy_4.py +++ b/tests/test_dynapcnnnetwork/model_dummy_4.py @@ -38,7 +38,7 @@ def __init__(self, batch_size) -> None: surrogate_grad_fn=PeriodicExponential(), ) self.pool3 = SumPool2d(2, 2) - self.pool3a = SumPool2d(5, 5) + self.pool3a = SumPool2d(2, 2) self.conv4 = nn.Conv2d(1, 1, 2, 1, bias=False) self.iaf4 = IAFSqueeze( @@ -47,7 +47,7 @@ def __init__(self, batch_size) -> None: spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool4 = SumPool2d(3, 3) + self.pool4 = SumPool2d(2, 2) self.flat1 = nn.Flatten() self.flat2 = nn.Flatten() @@ -59,8 +59,9 @@ def __init__(self, batch_size) -> None: spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) + self.pool5 = SumPool2d(2, 2) - self.fc2 = nn.Linear(25, 10, bias=False) + self.fc2 = nn.Linear(49, 10, bias=False) self.iaf2_fc = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, @@ -98,7 +99,8 @@ def forward(self, x): # conv 5 - E/4 conv5_out = self.conv5(pool3a_out) iaf5_out = self.iaf5(conv5_out) - flat2_out = self.flat2(iaf5_out) + pool5_out = self.pool5(iaf5_out) + flat2_out = self.flat2(pool5_out) # fc 2 - F/5 merge2_out = self.merge2(flat2_out, flat1_out) diff --git a/tests/test_dynapcnnnetwork/model_dummy_5.py b/tests/test_dynapcnnnetwork/model_dummy_5.py index a49736865..82354d2b6 100644 --- a/tests/test_dynapcnnnetwork/model_dummy_5.py +++ b/tests/test_dynapcnnnetwork/model_dummy_5.py @@ -1,5 +1,5 @@ """ -implementing "two independendt networks" in https://github.com/synsense/sinabs/issues/181 +implementing "two independent networks" in https://github.com/synsense/sinabs/issues/181 TODO: This is not working yet """ @@ -15,6 +15,7 @@ def __init__(self, batch_size) -> None: super().__init__() self.conv_A = nn.Conv2d(2, 4, 2, 1, bias=False) + self.iaf_A = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, @@ -136,15 +137,14 @@ def forward(self, x): fc3_out1 = self.fc3(iaf2_fc_out1) iaf3_fc3_out1 = self.iaf3_fc(fc3_out1) - # FC 8 - H fc1_out2 = self.fc1(flat_brach2_out) iaf2_fc_out2 = self.iaf1_fc(fc1_out2) fc2_out2 = self.fc2(iaf2_fc_out2) - iaf3_fc3_out2 = self.iaf2_fc(fc2_out2) + iaf2_fc3_out2 = self.iaf2_fc(fc2_out2) - fc3_out_2 = self.fc3(iaf3_fc3_out2) + fc3_out_2 = self.fc3(iaf2_fc3_out2) iaf3_fc3_out2 = self.iaf3_fc(fc3_out_2) return iaf3_fc3_out1, iaf3_fc3_out2 From a25bb278df3e2a31eba63588d5d6f5abc2558c9a Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 09:10:15 +0100 Subject: [PATCH 05/11] Add tests for mapping network on hardware --- .../test_dynapcnnnetwork.py | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py b/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py index de694ace6..1da670828 100644 --- a/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py +++ b/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py @@ -2,8 +2,12 @@ import torch from sinabs.backend.dynapcnn.dynapcnn_network import DynapcnnNetwork +from .hw_utils import find_open_devices, supported_device_types_for_testing -from .conftest_dynapcnnnetwork import args_DynapcnnNetworkTest +from .conftest_dynapcnnnetwork import ( + args_DynapcnnNetworkTest, + args_DynapcnnNetworkMappingTest, +) @pytest.mark.parametrize( @@ -58,3 +62,22 @@ def test_DynapcnnNetwork(snn, input_shape, batch_size, expected_output): assert ( expected_output["output_shape"] == output.shape ), "wrong model output tensor shape." + + +@pytest.mark.parametrize( + "snn, input_shape, batch_size", args_DynapcnnNetworkMappingTest +) +def test_DynapcnnNetwork_movement(snn, input_shape, batch_size): + """Tests if the network can be mapped to the hardware""" + + dcnnnet = DynapcnnNetwork(snn, input_shape, batch_size) + + devices = find_open_devices() + + if len(devices) == 0: + pytest.skip("A connected Speck device is required to run this test") + + # deploy to devkit + for device in devices: + if device in supported_device_types_for_testing: + dcnnnet.to(device=device, monitor_layers=["dvs", -1]) From 5ec7f7173ad20e8e6de5af3cbac2476bac904320 Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 09:12:30 +0100 Subject: [PATCH 06/11] Fix typo and add TODO I dont get why multiple inputs have a return different from function description. --- sinabs/backend/dynapcnn/nir_graph_extractor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sinabs/backend/dynapcnn/nir_graph_extractor.py b/sinabs/backend/dynapcnn/nir_graph_extractor.py index beecd50af..c14c41b81 100644 --- a/sinabs/backend/dynapcnn/nir_graph_extractor.py +++ b/sinabs/backend/dynapcnn/nir_graph_extractor.py @@ -752,7 +752,7 @@ def _get_nodes_io_shapes( # different input sources to a core to have the same output shapes. if any(inp.shape != inputs[0].shape for inp in inputs): raise ValueError( - f"Layer `sinabs.layers.merge.Merge` (node {node}) requires input tensors with the same shape" + f"Layer `sinabs.layers.merge.Merge` (node {node}) requires input tensors with the same shape." ) # forward input through the node. @@ -828,6 +828,7 @@ def _find_source_of_input_to(self, node: int) -> int: if len(sources) == 0: return -1 if len(sources) > 1: + # return -1 #TODO: why throw a runtime error when the documentation explicitly say -1 in case of more than one input node? raise RuntimeError(f"Node {node} has more than 1 input") return sources.pop() From 7a3a8e021e847f5d3bb90aa74fb13048353f0cfa Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 09:36:59 +0100 Subject: [PATCH 07/11] Move hw_utils to test folder as it can be used for multiple tests Update all the references to it --- tests/{test_dynapcnn => }/hw_utils.py | 0 tests/test_dynapcnn/__init__.py | 0 tests/test_dynapcnn/test_device_movement.py | 2 +- tests/test_dynapcnn/test_discover_device.py | 2 +- tests/test_dynapcnn/test_large_net.py | 2 +- tests/test_dynapcnn/test_neuron_leak.py | 7 +------ tests/test_dynapcnn/test_single_neuron_hardware.py | 3 +-- tests/test_dynapcnn/test_visualizer.py | 5 ++--- tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py | 2 +- 9 files changed, 8 insertions(+), 15 deletions(-) rename tests/{test_dynapcnn => }/hw_utils.py (100%) create mode 100644 tests/test_dynapcnn/__init__.py diff --git a/tests/test_dynapcnn/hw_utils.py b/tests/hw_utils.py similarity index 100% rename from tests/test_dynapcnn/hw_utils.py rename to tests/hw_utils.py diff --git a/tests/test_dynapcnn/__init__.py b/tests/test_dynapcnn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_dynapcnn/test_device_movement.py b/tests/test_dynapcnn/test_device_movement.py index 34d84c5ec..f8864c24b 100644 --- a/tests/test_dynapcnn/test_device_movement.py +++ b/tests/test_dynapcnn/test_device_movement.py @@ -3,7 +3,7 @@ from sinabs.backend.dynapcnn import DynapcnnNetwork from sinabs.from_torch import from_model -from hw_utils import find_open_devices +from tests.hw_utils import find_open_devices ann = nn.Sequential( nn.Conv2d(1, 20, 5, 1, bias=False), diff --git a/tests/test_dynapcnn/test_discover_device.py b/tests/test_dynapcnn/test_discover_device.py index caae4eef1..cecd55b3a 100644 --- a/tests/test_dynapcnn/test_discover_device.py +++ b/tests/test_dynapcnn/test_discover_device.py @@ -2,7 +2,7 @@ import samna from sinabs.backend.dynapcnn import io -from hw_utils import find_open_devices +from tests.hw_utils import find_open_devices def test_is_device_type(): diff --git a/tests/test_dynapcnn/test_large_net.py b/tests/test_dynapcnn/test_large_net.py index 4c7c9432a..27937b354 100644 --- a/tests/test_dynapcnn/test_large_net.py +++ b/tests/test_dynapcnn/test_large_net.py @@ -14,7 +14,7 @@ from sinabs.backend.dynapcnn.dynapcnn_network import DynapcnnNetwork from sinabs.from_torch import from_model from sinabs.layers import NeuromorphicReLU -from hw_utils import find_open_devices +from tests.hw_utils import find_open_devices class DynapCnnNetA(nn.Module): diff --git a/tests/test_dynapcnn/test_neuron_leak.py b/tests/test_dynapcnn/test_neuron_leak.py index 7bc0c63b5..769e366c5 100644 --- a/tests/test_dynapcnn/test_neuron_leak.py +++ b/tests/test_dynapcnn/test_neuron_leak.py @@ -3,12 +3,7 @@ import pytest import samna import torch -from hw_utils import ( - find_open_devices, - get_ones_network, - is_any_samna_device_connected, - is_device_connected, -) +from tests.hw_utils import find_open_devices, is_any_samna_device_connected from torch import nn from sinabs.backend.dynapcnn import DynapcnnNetwork diff --git a/tests/test_dynapcnn/test_single_neuron_hardware.py b/tests/test_dynapcnn/test_single_neuron_hardware.py index d7d730222..728c21ac4 100644 --- a/tests/test_dynapcnn/test_single_neuron_hardware.py +++ b/tests/test_dynapcnn/test_single_neuron_hardware.py @@ -1,9 +1,8 @@ import pytest -from hw_utils import ( +from tests.hw_utils import ( find_open_devices, get_ones_network, is_any_samna_device_connected, - is_device_connected, reset_all_connected_boards, ) diff --git a/tests/test_dynapcnn/test_visualizer.py b/tests/test_dynapcnn/test_visualizer.py index 8ac42747f..dbe1fe397 100644 --- a/tests/test_dynapcnn/test_visualizer.py +++ b/tests/test_dynapcnn/test_visualizer.py @@ -3,9 +3,8 @@ import pytest import samna -from custom_jit_filters import majority_readout_filter as custom_filter -from hw_utils import find_open_devices, is_any_samna_device_connected - +from .custom_jit_filters import majority_readout_filter as custom_filter +from tests.hw_utils import find_open_devices from sinabs.backend.dynapcnn.dynapcnn_visualizer import DynapcnnVisualizer diff --git a/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py b/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py index 1da670828..3706a98f4 100644 --- a/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py +++ b/tests/test_dynapcnnnetwork/test_dynapcnnnetwork.py @@ -2,7 +2,7 @@ import torch from sinabs.backend.dynapcnn.dynapcnn_network import DynapcnnNetwork -from .hw_utils import find_open_devices, supported_device_types_for_testing +from tests.hw_utils import find_open_devices, supported_device_types_for_testing from .conftest_dynapcnnnetwork import ( args_DynapcnnNetworkTest, From ef353c516e67ed5b65c4696d8df728f46ffdfe7a Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 10:16:03 +0100 Subject: [PATCH 08/11] Update documentation with the tested types of networks --- docs/speck/faqs/available_network_arch.md | 146 ++++++++++++---------- 1 file changed, 82 insertions(+), 64 deletions(-) diff --git a/docs/speck/faqs/available_network_arch.md b/docs/speck/faqs/available_network_arch.md index 654120be9..7ae38a3b3 100644 --- a/docs/speck/faqs/available_network_arch.md +++ b/docs/speck/faqs/available_network_arch.md @@ -39,23 +39,31 @@ dynapcnn.to(devcie="your device", chip_layers_ordering=[2, 5, 7, 1]) ## What network structure can I define? -`Sinabs` can parse a `torch.nn.Sequential` like architecture, so it is recommended to -use a `Sequential` like network. +`Sinabs` can parse a `torch.nn.Sequential` like architecture, so it is recommended to use a `Sequential` like network. As of `v3.1.0`, we released a network graph extraction feature that helps users deploy their networks with more complex architectures into the devkit. Our `Speck` chip, in fact, supports branched architectures. With the graph extraction feature, we support a range of network structures, as shown below: +A network with a merge and a split: + +![A network with a merge and a split](imgs/network-with-merge-and-split.png) + Two networks with merging outputs: ![Two networks with merging outputs](imgs/two-networks-merging-output.png) -A network with a merge and a split: +A network with residual connections: -![A network with a merge and a split](imgs/network-with-merge-and-split.png) +![A network with residual connections](imgs/network-with-residual-connection.png) +A more complex network: + +![A more complex network](imgs/complex-network.png) Note: with the graph extracture feature it is possible to implement recurrent neural networks. However, this is not recommended or supported as it can result in deadlock on the chip. +Note2: the use of two parallel network although supported by our chip was not fully considered in our sinabs implementation. + ## How to make use of the graph extraction feature? For general architectures, users need to define their classes, by defining at least the `__init__` method with all the layers, as well as an appropriate `forward` method. @@ -63,64 +71,70 @@ For general architectures, users need to define their classes, by defining at le Here is an example to define a network with a merge and a split: ```python +import torch import torch.nn as nn from sinabs.activation.surrogate_gradient_fn import PeriodicExponential from sinabs.layers import IAFSqueeze, Merge, SumPool2d + class SNN(nn.Module): def __init__(self, batch_size) -> None: super().__init__() - - self.conv1 = nn.Conv2d(2, 1, 2, 1, bias=False) - self.iaf1 = IAFSqueeze( + # -- graph node A -- + self.conv_A = nn.Conv2d(2, 4, 2, 1, bias=False) + self.iaf_A = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - - self.conv2 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf2 = IAFSqueeze( + # -- graph node B -- + self.conv_B = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf2_B = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool2 = SumPool2d(2, 2) - - self.conv3 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf3 = IAFSqueeze( + self.pool_B = SumPool2d(2, 2) + # -- graph node C -- + self.conv_C = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_C = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool3 = SumPool2d(2, 2) - self.pool3a = SumPool2d(5, 5) - - self.conv4 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf4 = IAFSqueeze( + self.pool_C = SumPool2d(2, 2) + # -- graph node D -- + self.conv_D = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_D = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - self.pool4 = SumPool2d(3, 3) - - self.flat1 = nn.Flatten() - self.flat2 = nn.Flatten() - - self.conv5 = nn.Conv2d(1, 1, 2, 1, bias=False) - self.iaf5 = IAFSqueeze( + # -- graph node E -- + self.conv_E = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf3_E = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, surrogate_grad_fn=PeriodicExponential(), ) - - self.fc2 = nn.Linear(25, 10, bias=False) - self.iaf2_fc = IAFSqueeze( + self.pool_E = SumPool2d(2, 2) + # -- graph node F -- + self.conv_F = nn.Conv2d(4, 4, 2, 1, bias=False) + self.iaf_F = IAFSqueeze( + batch_size=batch_size, + min_v_mem=-1.0, + spike_threshold=1.0, + surrogate_grad_fn=PeriodicExponential(), + ) + # -- graph node G -- + self.fc3 = nn.Linear(144, 10, bias=False) + self.iaf3_fc = IAFSqueeze( batch_size=batch_size, min_v_mem=-1.0, spike_threshold=1.0, @@ -129,43 +143,47 @@ class SNN(nn.Module): # -- merges -- self.merge1 = Merge() - self.merge2 = Merge() + + # -- falts -- + self.flat_D = nn.Flatten() + self.flat_F = nn.Flatten() def forward(self, x): # conv 1 - A/0 - con1_out = self.conv1(x) - iaf1_out = self.iaf1(con1_out) + convA_out = self.conv_A(x) + iaf_A_out = self.iaf_A(convA_out) # conv 2 - B/1 - conv2_out = self.conv2(iaf1_out) - iaf2_out = self.iaf2(conv2_out) - pool2_out = self.pool2(iaf2_out) + conv_B_out = self.conv_B(iaf_A_out) + iaf_B_out = self.iaf2_B(conv_B_out) + pool_B_out = self.pool_B(iaf_B_out) # conv 3 - C/2 - conv3_out = self.conv3(iaf1_out) - iaf3_out = self.iaf3(conv3_out) - pool3_out = self.pool3(iaf3_out) - pool3a_out = self.pool3a(iaf3_out) - - # conv 4 - D/3 - merge1_out = self.merge1(pool2_out, pool3_out) - conv4_out = self.conv4(merge1_out) - iaf4_out = self.iaf4(conv4_out) - pool4_out = self.pool4(iaf4_out) - flat1_out = self.flat1(pool4_out) - - # conv 5 - E/4 - conv5_out = self.conv5(pool3a_out) - iaf5_out = self.iaf5(conv5_out) - flat2_out = self.flat2(iaf5_out) - - # fc 2 - F/5 - merge2_out = self.merge2(flat2_out, flat1_out) - - fc2_out = self.fc2(merge2_out) - iaf2_fc_out = self.iaf2_fc(fc2_out) - - return iaf2_fc_out + conv_C_out = self.conv_C(pool_B_out) + iaf_C_out = self.iaf_C(conv_C_out) + pool_C_out = self.pool_C(iaf_C_out) + + # conv 4 - D/4 + conv_D_out = self.conv_D(pool_C_out) + iaf_D_out = self.iaf_D(conv_D_out) + # fc 1 - E/3 + conv_E_out = self.conv_E(pool_B_out) + iaf3_E_out = self.iaf3_E(conv_E_out) + pool_E_out = self.pool_E(iaf3_E_out) + + # fc 2 - F/6 + conv_F_out = self.conv_F(pool_E_out) + iaf_F_out = self.iaf_F(conv_F_out) + + # fc 2 - G/5 + flat_D_out = self.flat_D(iaf_D_out) + flat_F_out = self.flat_F(iaf_F_out) + + merge1_out = self.merge1(flat_D_out, flat_F_out) + fc3_out = self.fc3(merge1_out) + iaf3_fc_out = self.iaf3_fc(fc3_out) + + return iaf3_fc_out ``` ## Can I achieve a "Residual Connection" like ResNet does? @@ -175,9 +193,9 @@ change the `samna.speck2f.configuration.CNNLayerDestination.layer` to achieve th familiar with the `samna-configuration`. You can also make use of our network graph extraction feature, to implement residual networks. -## How to use "Residual Connection" manually? +## How can I define "Residual Connection" manually? -Alright! Here I will give an example of achieving the "Residual Connection" by manually modify the `samna-configuration`. +You can also achieve "Residual Connection" by manually modify the `samna-configuration`. Let's say you want an architecture like below: @@ -216,7 +234,7 @@ class ResidualBlock(nn.Module): ``` -Since currently Sinabs can only parse Sequential like network, we need to do some tedious work like below: +You can write it like: ```python # define a Sequential first @@ -259,8 +277,8 @@ devkit.get_model().apply_configuration(samna_cfg) ``` -I have to say it is not an elegant solution though, it should help you to achieve an initial Residual Block. We will -improve this part after Sinabs has the ability for extracting model's graph. +It is a lot of manual work but it will let you have your Residual Block. +We recommend to use our network graph extraction feature for residual connections. ## What execution order should I be aware of when I am implementing a sequential structure? You should be aware with the internal layer order. From 1374c2498a138f721dfbc0e4d83c2ff5b901e179 Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 10:16:35 +0100 Subject: [PATCH 09/11] Remove comment of node counting --- .../faqs/imgs/two-independent-networks.png | Bin 7090 -> 0 bytes tests/test_dynapcnnnetwork/model_dummy_2.py | 41 +++++++++--------- 2 files changed, 20 insertions(+), 21 deletions(-) delete mode 100644 docs/speck/faqs/imgs/two-independent-networks.png diff --git a/docs/speck/faqs/imgs/two-independent-networks.png b/docs/speck/faqs/imgs/two-independent-networks.png deleted file mode 100644 index 66483143b1477e4dff8f2ac33c1c23fb3aae56d6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7090 zcmcIp3pCW*{-1KvQ_ekglEF9Ag;revaoD%;4_n-v3#5-Q%ux?ppu5>{)B<-|x5gZ}0v6ejeX>W^Aa-b3ot# z1OnlKUAuAv0@+mwf$a9!#|?Te+FR>_j^CWLwT)rg+M;gmu8vO54iJdsi+~pz*J>{c zeYPaHS=Z_|h#woMId=aGwuD&(C|M(VEUuN@>I1lW3X!_7?w*~v6>7n6wdBJ(2 z`vM>Se$eo_%#Gpg=*ywUc_UbhblM0VDyncsU$N5eVXlRa>fiWE%FT*cKVJ$HeQ2w% z*U~j{BS%cVfvn;0UlqC}J&y^Bus-D~S0nLLLxPKWonW1OQe2x4?n2A;=*#|w>MmdR zJQFFGsbtrePrk2_uhkLPFcK0!77CU2Ixoe4X7T8QD<+Cfe8n&HuCc(~jI1cE1>~9`J6z9x-~MCl2Qi6FT_z^5fC9kK03D zct>0JPX-4*|5_7@4hq{nPvkDiinU$DglpRyqy;1gS=uDaT z{H}Plnwl6H{^*nIP6rAe=MKM`}Hqc3- z5|OtzHz5Nd?(LC@o!#JAxb5_GuRyjrf3K^uV?fV-w`&#{2t?*2=d;T@Q_B-{?#03k zuI`;WAiPId%4e{jb00tUsySBM6^(Xq!9uj%9c-}$S?6Z@?#9edO}BBAV{5&}zI zy(xjeh4d?19mQ0bJF1~3GJUV0E31w^*M(6`Zi{k1fA~m_tIk1YhJC4+SMV_`Paf4e z!{BRSuh|EzX%fh{iLcW1ip-_CAx-8Ve}FuN?$GS^LUw8odw1>FL_HxpwjIX?onY+D z!j2#2`&aYx%eSdBJSM+H9H(iVG@P@4x|%$P2AxXC%9}!g{>(7-R?4wT;_Ff=eD>D+ z+uB&S!U9_A$tPaQzm*{3kgSD~6m$_w>cUE|Fu!UU{)Fn=6sjeL9B-pgr*?iVfv$2t z8GZ=bT>wR|x5wY&e*^6=;kq~r`E&bG4YFgaI}F*e6>&lSr9IS2zkXz4dxJfp^$NGm zP7`b7BYNxUL~6E6#wKT{`l^$yFx|KnqzhrOM?oP>HnrD;E+9Dg<%p4b#RQW%7!=uS z(oun(ioO(l7)>ti?pEYJqNWC+v8V zJW2grBONcdt1hD0Z7{$~0w%7XkZ?IIYH`HKC{kFX#FvrF9#`IW5d{yUEz1-f7KcM( z#3oYJRA3#YD6_T;wRvi<)z^0ayi|hQRA_CjbZ0M~WNtK-Vn`{;BHL?weOB6WV@u6^ z8D(WjMUtm0J?C1+;)B*6l2bf-_PkPM)Pl7&QIZP#+fgFQORYEe=w3){}Xs*X^iB^m^RDFpNlg!N_kJ3AgRpcp zeR*@0`4oC8J1a|xNn>HOMLkmn-dwFDp3gLv98E-NmXRjB=>0Fm*5_Po-VII#zwnquXU%>vxQ%VZ2lMBr1xvoEV)tIcK`5#=KEzx>EIHMCG6d%k>CzA1!Aibcp zWzsoPu9?&g6O*Ee>`cPkiPJ~rY(Ld$ad$BLcV#wE6H?e_1N$Wi=K?mmpbHx%Z55*1 zY|>y$v4atEd7MmTAcwZ`DQaG$#mzF#V6qwh3Sqefx9KikQkBN8eQWMO}|px|fFv5YrwhM@uT?eOX)o)MFu z>ca(bdxgFGew_W5`aMJc1OPjxKIq&r^+D&3**|OI^vKJ#6=PX4UY`Ht^($+0EDHp;MGL+!65?H>vJXEgXn&yIiqI(GyF(D^Tb@W=PR zKDPJXild>Q-|(wud(8AWbY#Hhs;l3;`Nc?cJ>%Qtf|7G@)UzWJWvq>+rY32N;yO2L zib=#_mC>l1H*bEJ>6@OOw!*ew)kzmynI9w$AS-io&va%RDJS-tv_0Vw>?xQ@OiGfs zuRj!QA}1gqkc_U27#MGP6(fVF5L$RO-<4x3?>2N1Ty!L*4iuDVI5ZtaTFy#(Nu*$J zJUPT)Jczn&X({hTx3V|6VW_@XcL=K5MbVk zp$qlsvKB$1(Ts@4z3mpQIISR({1Dg={Zw_80M@GFAqTIrZ2ecmnjgUBaA6JA+7Mo& zVpd@NJByOA%NEBK?{}rLI?kRwTRs}C7uM0Z_P}=j(?6-Hd0JLcNy%}n>19q~A!+Di zaPxE#RF>eUlib(v5uH62Dp)d1$#+=xPE1TJw)_0NSeV>8Rz-N#*hFp~ zsslfNNo732R)saod$%QlDG(ODR1_H?M$#e@5agb&u5p@Qkc&c5`a2sM-vJc>=1g|A zxwK<*Ts>j6Z{wW}Q7KY=>5dFxfq0kRA1N3b-C5~1xH3zbd03=9D~T~P>jn@`Wv+YM zj|i^!;9VL4n2+jeRh-Pm1sX=EPG1N%xshc=6zj}FI6rOM%Aox)JI}vp1`iP53&pxc zk5Z60xdWn}vEOz9P`y)ABUU*``{75mCPNGcW9>V|ghijwj*#7F9Nk|sf>u;i97ox( z+!GT8I=`N;hu__8r#saLApV$iLM6F@!^ssMC{qiISFjk(SQU(>qa>gS1R9NA?=B2n zkHh8V=}$FSk*bI&oX~h~zLqWlgwM(N}XjT@}AP*SBaVr^YOU zLWijii6uZiXG8h4l0mr3iEMm5#hsp^ps1LYlkxnwX!ld- zSPsZASh0PBr#-|@cb)Ry7Gdo(mMOM)Oq{cRdm?8;%D+89b@uecZEWn~S9T$o0H#}+ z?JJcR*_@;)FHrmXI(pZ1lwAB3VIA2<2u&8{TxM==-<^lOCcb@reIUv)egW@{B{MAX zbd}ZKZz_v*rtTm2bRJ$C%;1n++wTuXmTS0-1RoV+I}PyszixZ)cHO-*XzqlV06PCV zXaY3$HqG2mt5PJPTN5YB1RfQCSfev^CchM#pK%uT4B8X?q${p_#0i{4u|u<*_rg#Y znauKYuTDWXU5>XaGFRYo?IZYLfC!3ue2~v^V`Yjv%^94`SeSgTbnFA%bA1CV1slx`t7}KZJ!4M zJ_|=RPcS@BH=pSxO1j<2wjDheF5b88L=U zQ75!l{R~hj)OcG8O8R=!Gl|rMH0=;o>QiVX!0>JKyz5Oc-EiHroV+~wqohfY``|gY+9h(!@>XO{Ss9#?IwzL1B4jwb)zL=iEy;;+8SBs~VjW3Qcer@p zca87EQFZ?YDHs8U*P>H)R&P5>w|^M7KgmMi=Kf6?*5FJs_g{-1YsNKd^PX)2s{L+f z78!ujDjC%>~Sb$bz&I5h1m(zan@Ys~Q z`=woFeUv%TWlFm4kQsE~b42lKh?a9RkFr&gM5;HI)ZojcCk1Z$I&x<4UzfM2X1`)D zgbX%z!)4xV8pw>m^Epzz6M74PW;P|t!X!?L(h7ECxQij%3a`rwoLoRGR7r5xU zzQrWIBn7VO@heOk*8+unk6e%fM5T+h_cwe_K|y27tN8I`jAm3tMIbC1xD#nah1ejA zMqW-DWcw9VZ!}qX4Q#gn^b6MwS&iIYTkj9rt~!16K>5%kr&sFhZ-jg&zxinnc6B8* z2(v6)D(CVns(j6k$*^75r{VM0=4k=p@T~rlnY!vN#`a4->oxR!ROMK`pld6bnZ6$p z4YUx4VU+vbQ1y{o8X!73=I0t_S!Sf&R%2r_xO{udFGdW<9-VL>dx`6l!ooW8;PX~) zA3yjr@&@(53?;xzl{0B-;S(>RV~)UDV7^;GnW~j^m5oVTS3fkl0D=_8LY@}dI!6N>C zg}Cr_oTBo46DQ&sGd7KJXAM-=hPJpfY7-S_mOw;&kHSs*w&oBcQe(A{Rn-6%HSE|% zP-qa*I>Qkz{S(K=dn}33j2SY0-QUvEGSzo_u?43k!I2}JBEq=5jlO|F{^I2O2(^L} zBI{oWVvU@PnJKW08r-BS`&I23>?cT()R%*nm_b@vHfx``%R|9hZJf#gKoLrXho0~1Yzh#3Z7iCXqqzQsL8IGg8HDZ43CFItM~d&Y zYgklZVLk6GUV6+}8=}$pm3gi6+9AS897xJec6S6!!6WX8G{RN1 zB>OmlE$upE5+`E|ipNj?8DfqI=x54?fG|`U&Sx9ORQ}S()nO z*&W*;Wpm`euFmbWmOtM7-@fd)kpE==cN%!dO{&2aaEg?PX_(u17lgP$Vi$cz>dcv? z(HJ9v!;&g5#U}~>^u8ITrHqHYc7wr{Io8#B!0eyawR@MHuFrAcZZ(&9)Z;E<$8^f@GBT zo^|ae9FANXZJeUw3vI!m{$YQ3QtJVZC*2*p=P${Hw3x+_PqfVO@g-mNfeMD9c5Onj zzcVP1(rX^|E*hrzEQ+4Qx=F<=J%?iL{)b$znms;_04%r2(qx@J+EL{fDTMv{k>d^c zwYFBm1ijorY158VZ$pF#`+5UY87&6Li$lOY?|fv5+nWag|ECH)sQc^X(H=u?qn940c$yEWP*b*?d7nU?MAZZtls6Uj z0sDzlrg;aD3Rx>T^56fBx3S)AfO{)WQA*-3f!W#YVZQA+F@BAWE8o4@ckVw&gH|$$ z+g8EAt@%(0>kO-)O+`-iR<=P1XKJX@DS5GRu|(EKk5fdg<<*L!x)K!&tIAGj_^md5 zdc3Xi_|b$eSivJuST6C~A_t36iZf*em8%ou;^M>_U+4;_pbD#GxwsE090Tu|rHHcb zpJD#|^UqVA*)km11JyyP>=aMWGiS~m;)H!Bikz#yP}BJYqPj=<&d%oMFT0O_6v1{h zb9W-IA0hbh4pjH-gV@2VD?CdtqqOr|u Wd=c0C5%ALu1a{T%O2OqjkNyrZyGFAB diff --git a/tests/test_dynapcnnnetwork/model_dummy_2.py b/tests/test_dynapcnnnetwork/model_dummy_2.py index cff57e8d4..2fc3af5ce 100644 --- a/tests/test_dynapcnnnetwork/model_dummy_2.py +++ b/tests/test_dynapcnnnetwork/model_dummy_2.py @@ -81,39 +81,38 @@ def __init__(self, batch_size) -> None: def forward(self, x): # conv 1 - A/0 - convA_out = self.conv_A(x) # node 0 - iaf_A_out = self.iaf_A(convA_out) # node 1 + convA_out = self.conv_A(x) + iaf_A_out = self.iaf_A(convA_out) # conv 2 - B/1 - conv_B_out = self.conv_B(iaf_A_out) # node 2 - iaf_B_out = self.iaf2_B(conv_B_out) # node 3 - pool_B_out = self.pool_B(iaf_B_out) # node 4 + conv_B_out = self.conv_B(iaf_A_out) + iaf_B_out = self.iaf2_B(conv_B_out) + pool_B_out = self.pool_B(iaf_B_out) # conv 3 - C/2 - conv_C_out = self.conv_C(pool_B_out) # node 5 - iaf_C_out = self.iaf_C(conv_C_out) # node 7 - pool_C_out = self.pool_C(iaf_C_out) # node 8 + conv_C_out = self.conv_C(pool_B_out) + iaf_C_out = self.iaf_C(conv_C_out) + pool_C_out = self.pool_C(iaf_C_out) # conv 4 - D/4 - conv_D_out = self.conv_D(pool_C_out) # node 9 - iaf_D_out = self.iaf_D(conv_D_out) # node 10 - + conv_D_out = self.conv_D(pool_C_out) + iaf_D_out = self.iaf_D(conv_D_out) # fc 1 - E/3 - conv_E_out = self.conv_E(pool_B_out) # node 6 - iaf3_E_out = self.iaf3_E(conv_E_out) # node 12 - pool_E_out = self.pool_E(iaf3_E_out) # node 13 + conv_E_out = self.conv_E(pool_B_out) + iaf3_E_out = self.iaf3_E(conv_E_out) + pool_E_out = self.pool_E(iaf3_E_out) # fc 2 - F/6 - conv_F_out = self.conv_F(pool_E_out) # node 14 - iaf_F_out = self.iaf_F(conv_F_out) # node 15 + conv_F_out = self.conv_F(pool_E_out) + iaf_F_out = self.iaf_F(conv_F_out) # fc 2 - G/5 - flat_D_out = self.flat_D(iaf_D_out) # node 11 - flat_F_out = self.flat_F(iaf_F_out) # node 16 + flat_D_out = self.flat_D(iaf_D_out) + flat_F_out = self.flat_F(iaf_F_out) - merge1_out = self.merge1(flat_D_out, flat_F_out) # node 19 - fc3_out = self.fc3(merge1_out) # node 17 - iaf3_fc_out = self.iaf3_fc(fc3_out) # node 18 + merge1_out = self.merge1(flat_D_out, flat_F_out) + fc3_out = self.fc3(merge1_out) + iaf3_fc_out = self.iaf3_fc(fc3_out) return iaf3_fc_out From 68faf1c8639cf6b5cb778e5b9b24250d1afef025 Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 10:17:00 +0100 Subject: [PATCH 10/11] Add images for supported network types --- docs/speck/faqs/imgs/complex-network.png | Bin 0 -> 10989 bytes .../imgs/network-with-residual-connection.png | Bin 0 -> 8641 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/speck/faqs/imgs/complex-network.png create mode 100644 docs/speck/faqs/imgs/network-with-residual-connection.png diff --git a/docs/speck/faqs/imgs/complex-network.png b/docs/speck/faqs/imgs/complex-network.png new file mode 100644 index 0000000000000000000000000000000000000000..18c55b6198fdedc38dbff84f9cab503c5568f9ad GIT binary patch literal 10989 zcmb_?bySsGyYB)73j|bPff54ZQV?0Bl%RBXsFaKDkdjs;B&9?`Kmkb!5$P@g5$O;_ zTDn;@=UMyi{f&M0_`Wm7z4r{@pUL~av!3}pznCwS6{Sed(40Xa5F|3v;;IM)ULgX3 zPjre1T86QOpdvgYYUCP3FE{D;$OfxlUSybf`Bq@4 zj$XRufghv#sp5M%=U8Hop0V7N2?|dA^}uem1=Cm8bYJVF)?Km#ya{>+#UOUo32fxO zU?geS{_672$z@uGPucQZ1gAxNZZbv529+ITBR%|l5cdKNRZUDh6(hS>FY9Ah*S6T3 ziz+)0`H6a;wIY(G6Ahdi7=JP(jeeR>#J*t4Wy@)tZM;CF`*J5@A*kIpQSRJXAufy& z*G3~brm(j`mbB>OTk=jaDGh}Tk1Mku+nxj$X<8S26W%@BKT-=ZyPpmPhXY#*5y{8NlgGYeqw!61*ZHm|p4ii~+BYOV zwT!>?-rAe~hx7i4RS^O$w*h@iyfvln1l^l%Cx~qbs0MTuh6vbQe8w}%t-gd!Wn8}) zT)R*0sYZt^C)^tt`6;k}+Y|AzS7VRp_U#kRa?w@GYs<>Ew#P|xn^=PO5QnxI+OXfS zFGLtQDRIOx?kA-xBMMqh+ezPlj6kq6;r{Sk(?y)2@uZ`Sg2c&r{1d16sYsScKSL9> zqlC7jn60(7v5g}_%)$7Pqp=Z#i@Bp2gOrSdvewgcR0sqELPq?qy6f=L=o6h@!inQG zEOCq76GTv8$@Qf4thCoz=}*$Ejmydj*-d`Ff1hK#RC1oHiRl)%W{-H5{10wEo?W+1 z4LMm>t{5RPa#eBWsXHcK-kZU?TD87-Ccm~kgsQ_=+E!0inojT}?|LAKDTA9B5s1Z? z5vLIdabg655kilg06~UEAOey9YGS}3{%Z0G#6yTS!a)CQf`|WV`rG@srhorFu9yGt zKJJr$_WzIfMPFQwLwsguO%+wKO*ReW{9fQ!v3D+~fs6qodcUoeJ7-q^09hZzejJ%~ zkcjUOGiVcYvb|O|;D|ZJDoI@6l6c|2&FSw0fMNbMfqx7D_mnts?VB$hTwM5nz4C7d zIG8yya+!ptSIu+Zx%*mjt|=LOa@+9uXteE%bZnQIy5nXI@&p6M=WwvlFgP(Wu`g%9 zP{8{r5o73$xd_jGzJ*5Xi5zZL2{@fmAFK9`WDaYFwHWMFdCe?*U?);+?OtOske7O^ zW+V%`YQ_sh8dRJ)bLNxf0B>1^d#2?89Ve1_zHb3-Y+_;t{Su!bL)SWc?l0k`r!nWa zW2L62rr2cMHoKliro9m?;Ut6k{Mc%uN|uRq!_q156z*+Z%904?6>!@W)Gd$xA`NS3 zr6eo+@f_`*Y*NzC_4Rd448~*p*y;E8`;@^FU0s($@r;`yPWx0)(H`ESxOnmRaB&XJ zor5e!ky-C!>|;X0(8#VYk0oJpyhgL%K5){6#tUqb#L2VsjUMgJCjb5tAtvB3^-{p` z_g4d4XANDcW7lC7bjuxU#ytkT9bB;ABbWjh5(v-=ymXlQb{MfN$ zpZPw^%>0@o)q(sBG>vGZPmn2ZLb3O;&F(Iba!Z(Q(4mlTUCL8}WEDNVyzS+~XX6Og z$!YOlHc5xnp?d>0)#V>LX}N#jC!yKQWxmeW`PCI6^*;sm;_IRd$E|pOM)_ay|Bvv& z9i0DkB`$zPfzdP861)Jf7D&`^m;k zYkPaJN-o3Is}fNx3f+A72dGIyPGp;;%1+NYcJZ|cgDyzMh?|R%RoDsvSb)JJ2 zcD8%IFQ@OF#96=R&wu}_^9{idYeZN|%ggs}&h223)klRwDkKHhl+S? ze+o(BNnvW=v~_g!39eM8b|mmw9_*~q@LC~Lql+2>NG<1kv*rg2Q)j-tVKp^1mBb?_ zu(^sxt3+K>(tPkBp7Q2{G|dkW(6WU5-ba;^p)~0NPV*=sAsv{e>BT`3Jw@@dA0@WQ z{^w|K(9_dfkCaCEXcWM6=7)P*ZC}28FL=}t9Ua|>`Xx=gV{1}$zx~zCo|E(*DfW}i z*O{50t5`|9xs|ghCT2c=exhoBxkBd|DI?Lr_;9ghyns{o#N;FeCFT2bv;rJKB)B<* zP;)u{{(j}AUghik)4bua7gT1#=tc70iUnk+q=;VPwfc23mE^IrbN@5Kvzvp4;pQWy z_V6vL)Y|uJpVd%mFxG*uaa=sEs|Hn@-Et0gb_N$OnQ|aE+gS56y$<)>w)(W3=DM#E z5)z_`1CiP1iiUDwi_I7MKg9D`zS`TIYd|@(pit7))zy3ZW5;SE`m2fO?GTtRQz+s%icd!hfSxw%$U49ZCE680jy`)k+lNUi%&h+TX$Z*JpB@_u)Wk#(2fl~bQK;{@1>aeH zRJHLobIVAD(`gtPG2`W6hD&Vo)8%3< zHfB14LPCtl$k2n~U%$$S((uThK7AUR)P0Fakm*7GY;0`u#VN#AnkB+xf|3*MacF364{@&-+@-%E|S7iC{A3rrG-G zzCz9+9eH#xE3%O9eVFrJKJFbvFXt&Z*IWj-Dp#L{NTxC}GER+;*P&3{UI(ti2dfP< zd^U;fdX--oP<)k%fe+i-+Pcr)s!XY@6#Af7^`I|X!|>urqZO~^`c!MV>)JbBt3if8 zj)xQ`bBA6Y4zmixEqwJ9GU)gbhWiH0Wp{TsUeJZz*C+7p#LWkvq)j`M`r2Yxr>Ccz z7*G*8IXN?($@$hlK3^9Qh`%e^sBFKz^dmz_3nJ&Bypu>=jkOw!tTbpcQ<>AqYh={0+Wx0J`Y}ak6c!1mS zw=%weg}bvA2X4d5J$66d+563ei;&%+Qv1~Io*o{zO|#}&-Hpx7-rrrR#U>pz=@}VZ zuu^S$va&(-4Gk1DG&dmU5#7xa+UiwvaB&%c(?d-lPHb%3pL>6PAU}EdvvoTX#dGc2 zwa`m^iu7JPZ}ar3+87uxC_#7ZavzM0$9_V**{5%FWlZ18%j-e01%)9j$s*hRkK&MU zW^bk&jM8&Trn1PtPU*a}FCP~dM~u%%v>@pC`(HYARIp;zT*KKtzrF&?}qDl%)31Ons3k- zs%oWrKg?xTv}|o_O9MJ|nrJ-T499rxaJ7N79Q)Iq8h`pdWRuFYQ;h!I?O(t4#&Ma# z?v<@$v9r>|;>77L+l%-4Z{PmX(h`7Hwwdq$(5~-61RqF$|Go{X%oXLlY>k}eF)9Lb zUnfS*A`=CF(Xwp4Dgmu)&jR0W=IK_vr{S^SIMH?}9TNC7@F^2|(7N%fNzuoTACUy< zGha?nzyyt8>S^_CKtSc^~-Arb>mk_4I6vb!LW2 z@>uqV)Y(bl5NkcLh8QHMn1qCf6SKX!_th$#moRMF8U_X_P|aiy5|@6IJWgj@M&7!n zSr{18XvDcDg;rtF(b18utb!#x0TaA>>9$=;WmQ#h)#$*07L#O%0ZLof&aNQw&70t= zDx1;rkQ0plky}u@1~+tVY-}L4p7tK1HlE1JNV>5t-QLrq)+ur<#Pl?9yg$iv`+i@R z+U)!A1bLQ@ME{!hMB?QgQO{6Kr5}YNq8z5#+vUV@d`4dwV}u(Nt7aTM(AZ z%ge;iIk_Mez(HT|4=<~zfGitmFWP9g|IF8SdVE}E$Ci|o6mqKnBjxv%m6g5kB!Vxo zue>UO**7=^~IwR|+ z@BH~=$k_VQ#DYiVF_!?X`&}N3-tQ?QhfOxsHdq=VqorN@S*zUlbxMThlg**!87h`M zd1KF7=>E^E^xJwpS`YI%#i@6C%tUysTFDJnzFn3Xpo)k^z6&{VZd0>GFNwG=Lc32n zBO(VsIk$mK^OK>m5YtF=M1AU&E$aF<%x4xfK6#FtP7eB}>j<84=vl#kaSR-6`ro1* z6kj7wXNO%8+e7j2O#6q!I<)_&_zX}p_lnrPsfC3WWz>bWHT%8I9@4GJ@BpYB%u-TP zec98gif_v2*IJTQM$5OTPeYVEc5>>8QaX3@z(rEBTro)~Z|R2!K$@kliWM(r+m&Hr z!kppiLp8sEfL1Hs9OL%S^m$ItID z{HNOj#Rt7ZrQMw!P8yp_vyBWgXNVWC$Ux39A=VCsQrZ?1Yy|jWCgH5@nb*vK9dr#_ zjuaFUc(v};6G{?DVM-sz-a5V^V=-88+OXE*Tas|rSt>6T(OTbitgY_i?AF{y>E7m( z#5xE*)9#-9g~3Al&7=KxjT$c|;l1CYl27{*1)T0hIvyXAau5X~Wp6=z0pls3o5aJ@ z<*^=?FDotOJfF+LvlY*5Uq}{+%x&oFQ|J`BpuV#*GNN%Wt7^S%k+Jh3 z!ext~c@0aCCL(wUIq=NLsPM&$+}VMqU$4jCUdxZ?->`K%KJ0LCax!F#j)!HDQN>J1 zKx+LpFD@Nv(ZAW;b3UNSVkZLFpk65_YS-TSkh$x$UP z)*bQ1+B}wSXe5M5Y)nj6=q0AJ$JQLksc{trfL(pHPdhUomPjE_YYqT?v>5vM;m+~F z1y8JQ$hpgdOsQQc{V}&Qr>QgW2$D2mvdA;y#7+Ad6Xje0%Y~ zO3nX%Y+YI+PUq&Q)V@2`b?=&~Q3P$l_iiNdhj z)>uo|h6!*K1+@y1hBrY!1nms)Q`Q3m1HS`C{bW#wPk8d=kI{iK;6P2y&23h^k4>L3 zp;NLs5<1)_L-{s4_{M$%{?!5?xwO7-)|VZ#zOg|;MU?^E%Ap1K=*RYN-}nm3E&_6ml0={$6g(DDYV?TQ-G*PrfZ6CWb z0ML`iW5=#)d)U%>r^4W>)Z%Qi_l#n*^?3ccutsG4x7>#%iaDAex)b>yZ+;ge)hJ)- z92?U&H#aYLm<|F>p@a4uBN3w}_3I83C|znNN$GI-esK>!EOKXex3#ZNBSR^f1yg!e zPcgU6<<6~Z!=am-jyS|sSXk)nO*pY+HuUl8Y?EkXZG^V*%a<=x9yR#OME2+DKD?eM zT1mmaj395g2~Z+4CB^586$l_L7cXDV4WkvbXbPeL5S4R&%%#Yr!*S*-H66%GsHCW< zC@AB|dPxZ>Da+nW6*tfeSVt)WOB_0du_$)-u-FQFIh{e3CZ<;j3GHOOC`QJ60EE=E zwYAeL@zC!*}9ajV2Oblic>$DY$p@ARjq~TvtLN(p1D+-zFCrWo8Up?wk1;eXw&=T2Z2jYvO3<(A>8ma-E^kDIK;I;25ko8x;)@gWL8!&U z2m#?!rxL@b6xA{m`vS?CHn)G+ws%FSb(OStb}H2z?XilbXdeuy`}Jk2h(+Qsa>QNg zCkr1+J{h-DRvm8tnD5CT^&NRz?zC_oZO82Y?vOkWuyAw1M__POI}^bGoJ)Xk3FB)9 zF%>Q@TEQ!}A$VbptNNV=A(D`xLj&*Mhru#85$UVS^8R3L-^*5)=YudQjz7Y|)?F ziqeE7PKRvIc>+%4%;aS1)Rd7wDjm12!1QT^TnlD860YHxS|O%URaF&pM>|YnBYsdas}9!N_r?S0KY>^T zrR0T58wzt!&jkgdabeI{ZYj#0m*V?BJk0PXxx^_;ou!iM53xXv>rb0PtH#Sy zx6DBTWP}j=1a_U$6d)&X8vlrZ7&PqnPYoh*VhPK^g14aV;D{))WE`>z2s5uAl-PWL zm9!^hga8qf;Ea=pgUN^~xgsF&z_2-_>*Wu=VIsw`D-?=q6Hy1DT-}gfCm}AL>9#c= zf?)#LkGw$vCX9m8Us_svqo(QTaGwX$c#4FY^Lb6Ze51k1om&?H3IKF^>$N*gRSX27 zZJCUMBAtR&^Y@36pWRI1LiGd1EG^uR0Of?LClU<=NrKc*lLh| zSRp^s(a{N=vU=_7OMu(SH5zU+2H?$aU}3wx^C9P}OG^h5k<#S?4Rj!UN6UePIY>;@ z=v@RtI55|fK|w~A0!;ThE2|txXJ|A!X2yM_@<|aTyH4i!@5*_ipT<+C-X+P?3b`u7 z$tfQF8kh z(oflTOd%8#)at=Y|5}VQUw)q-4uOvf| zb;=AFjUkbvHBum4RPGh2si}}?Oti+-jEsyPz1&z|?@g5=4Mn2wwYIiCC^8B4@boO3 zT&e{PQ5}*d#umG=p#~dDeMUU@#s+X~s6B-%#LH7t%pxKp6}w)XoSdM|=Ox^6-rF>X z)!`yId5&%SbU;Ev0xsJFwNQm+x`;%gw6q2oZHulY@Y%AjuP98<&Ta&-E#U^M4pl1Q z4jphj6+nbXOOPi@=K_Ff1n9mYEz)L#!>L=i(wLl-l$Rvz?qK>O0!nM!k01N~;lL&3 z2#AOcE|Z}TpvQ!tp4icPz6UX0HOP06kDWJ!Jz#B0JZ!5VaV}d4Zv0LS|GCO(2l$5n z3?PvX;O=Rl^H5di4wl**0<0hG7@()1aO$ZDeJ#)O#LX=mXfz*Yn9%*W?#*B}0cbW{3IU75AlFjrD%0RzF8fJN&AVtmGHHvY2&+JNkJ{8q2gI)@aCsLE&S{ft-aZyq@wOCRm-Z$k+J0GQU;uiC zhex8G>v-HlucV5KfIEkqazsoR(KR~Pl!WE77X8TRsHhM6-U{Dc#+q=J4`8_{J$LIo zi{8vu<$`j5k^~gPuR4q^U!43OS_ZfWYKQwGPa0Yv6MnMjJ8wO9lyy(+`2FsNR-4&W zjJdWJ&?ArpjQmAk?6mp{&dki@$wcXCV3(_$76-`zKl`cxMIVfP)X*$_o}2(gC~rt4 zY(Rqro~++CL}ZeIMwsR5KG2_~b}c$N`IeLvoniq5r*pJxw1}r0q+7R;w}D7IEWkdH zhM7U%YxdC?ou6+6Ld1DK5V@Q*WYr(T8aq8V*Rc{@H(x41UgG;0Q)<}~c9;?p;t28a z0*xEPfXd+h%B@)Dtz}`Ky9o7DdWwy-a!NIM62T^OjW1H?yHdl4OGphaf`RvWHYa}@ z1a{m2& z05k(jnaD1)Xds`X?kk7$11mDZJHHxqzCY!>Ku@2svr~FrBts!#TRbEXkM9!4FLM&) zDz{k52#?u!Z9x}0lT#ym^XL+dZ^e=Ho{_{Ebi*o^1)oPLJB)iX_ zw&<0~l^+#l4~!QEB-ktdW$G5(C-S{ZM3BGX+d>zJTzm;BUs3TKtOo$4#70EiZG;l;#?H)G-JYF3^42B!O)~o1lcGBqvcW3qU%Y4aC(clnU>0;57ViP$)0b z(tb!wV}=9nvEF(OXOp}}8zON6KKRkg7Qkcxv$h~n#UUZYA9Tu8L4#EWshau(Jkf5z zVYah6K3wp~7a(gb11iS2{Z;GesBVKli6P7>IvkIHC==kg+u_!LaiKQygv)gBt?KU? z$zIt2VK{N4#G0{jbE{udO6mn%3=b@V+XiqH%1u=7Td|XWm=QQU*BnNhv$|>rDm&*X zn};|LP$?-F@ZDV5Y?2|_$nFL6v-x;+XMoh;1li@~JbrOv&>XZS__IeL%47=Cf&>*%_g-rsHn>sxx zI51Edq$_}MUy`+-_JQry_PnB^0=yTewFwFMf&z{-sQP`-t6pi)i z_N@j|k>As?`mctpOrmcz!&+geM}XUN&yiNZQF>#xQ_H}BAr+**JDy;T$8$V-=C-{k^CjB{sGdUNZIu@p0xmE7id5~qMI-ao>f?*4jW98yfQ&CaDiTg1P1f)VK zgl8$ufT+9eFO^WIofmLkd<`3@0jLQ#p*wf({Na>Ng0rHot^NAW{vub^?o5KtqRNX! zI6PoPsslp95wEhcGW40Y%d?Hjk3?4Wo=j5+uhtTrIeT^z`(<$ijMieuIn_`);Rj&w z;o~f~bwRI#Qk)}t2E=CJ}d-3)jkIjfSSZ!^stttT6!7TZTa{jfD zU(^5ntr)m_=|FwRIDpz_FX0SeSWcXu0z4hwd#z&i5^^8x9S&CEpe^LLNB|m0GBj`j zDI8A+%M5M|cr_I5*Vc;i3kb|lhKsyPPNp~1@BuJu4hDTtaBvSq>sBoRDaV-`mv3FG zHvvF+6AZPdPoL88JEU!|jNOvK?Ktdn^qV&yU_>11)bOTS1ic8X3)u77P;;yY3!eIb zE0n^_s+9+vHwWNA8kp1d;T7fOfMj%mYlav}z)ceX&b`y^vA_n$FWKz>@+JLb*2Q?Z z+v(j8e2Dt&-Cp2)No%|#p~<5`uh5wATWg(U48Q83C>pZf_SM9kCSHsYLEj*54Y1o%vP_MF4R&| zP;tn}Dk%-X&4%7`Crkg2&%kOL+mw=f?BHMow@<+RhJkRLx1xlVnK`h(ijg6ISmAtA z?EYOSBe=NT+}vag|9%P(yxni_8VwZTud>T9|N%p!M-|sZsh%HK>m=`E1*1z$FEzZU9Qi0^ecEL zegZBWnU@RbKm#^1bjl}rn{)HqpKgh1-|gS>u)cpj19AyeR4qL{JxDLg z0q1Cesqg?=$Zp(-1SLs%9Ik*c(~BHAAQq{poDmwYD=Gw;{>M93{}WjL6XyK=eP|Lj zp7?S?bdH$1TPiE-`cM((>}T!WJS5clQcsTI$nq@yZ&m6aA7t zn~C@bq*orYe{|A-u(efY{zr*gVM)zeGd=+2sJFP=S9DZVT40K}7MASq^&2wd?rF6) zq{cUgCx0Uo<)2LaZ#Tz$+QQBX`l#;V1a|}o9)H{_``aD3zYFF6xC8fpg#s6YAP|$% zGG@^DFvk50^a*)8gO2_P+H7AD4`XBWYEKzA_BA{u$AL(ctg@UfLt>#<_;)`EcnGAtRwEo_EjS*?$3O$sHH~ literal 0 HcmV?d00001 diff --git a/docs/speck/faqs/imgs/network-with-residual-connection.png b/docs/speck/faqs/imgs/network-with-residual-connection.png new file mode 100644 index 0000000000000000000000000000000000000000..d1456353ba4306ade99f096fe7bfb1134921bec9 GIT binary patch literal 8641 zcmbVx2|ShE+CEAmiloeACCU`B&B{g+GDHcx%#vA1*zrc&oT+3^$xI~kRIw>VLMl^i zBGb0b^ZxF~d+Pt5-+90D|IR7<_j~qw)?RBp_qx}0U)S}7T+q>Apkt?_prByTJd4(+ zprDMWprGp9y$f0fO453uA;?};^@66VDzA&1vyJ@?YYK{E(SFhOT5nW1n=qZ{!VEd5 zc!%#lmizjO=WMe{5WnH>muK?!9(}kKcvM6)H0;TD6PMX$i|$uVmyKzRPcv>%4)gHp zgo^49FUOn-W8D|IK07l&nBn3r%{?!6eKOIB`YGmxv;#W zHQepp53a~@*q2=vS{i5yGV(vYHY*ldXkE@;cBh~a6+k|e-kHjt&`9Hlf2ICkGX%>eK*bMVZ+(ofp=O|gm~2(=;#hS78g{`=`ob{VklS|vv;4* zEp#w*EHHC0=2}boydx!0&c5Jr_h(hgJY4o}{-J~&ns+$;u@(B!hYufq%{Aw}ab!8d zba}JNxzJW=`JsxwHvFSdkvuC+Ns&QyC!2y-82Q7|{rT<=n$oJ3d;aBlpJey8ieo}Tkr^5NdWNO)>fx-+>r2xSm@T)U=yYzO zqj&z$6*_h56sgM~3U8pR=+m-!+(>~dRuJ2CfmirK{lvtC_tq)~P2R$U9xtj{y1#B* zXv*ELv~G1{phm)HNulE^bSsw`@IFPiM>*t?f3=BK*6qBN z?O509{L%9j$I2Xg*`4ca@pc#X@L*4)kpP!iQ{UUXwYu6Vpp310)u8#0J^h<4M7HXx2`U_&RR067WvSJkFE1U8tC}QZ3O>Oxp3=NRX-_ zZ->I_NGxe0uLvJ=M9ZX+Fx{D@d&0Cr+^J$HIJ>ZLiHcwN{xNB%KGyO<|J!|Tts){5 zuI-N`1wVCm5&~$rXZhVsUA2P_PETUUKFJ=RG)Xepc^Q$J;zI`y;@>}#BxoqF=`Bq5 zw1=oPp43nDmw9mF@_tQC zO_8Nr?-L{hzP)rfv`=Xz{Sb>@faPdm^-O4|uCgLNif3>*tyQGFZ$I3;dZVwRV^3Ii zL&wrLhoUAzlrneNK@mC8RNW|UMT?==L8K4P{*tD{Xv6Qvs>xgHmK_;t9rai;;fL&p zR7H7y{;hdsa_63K7@)-C=ci+$qLG#EW8B8=$AyH1oT{hGR)+RrSz%L_&j<+5s`Zvw zYi%rKDMwK6Ifynf`R2O3p`$=*OgQCW)aSR{Z+-(!U1vuo@sOERgXu$PnU8Xox4XUy zq%QVZobaCiAXEDtN9U`D1Xfppgoa3sRrsgc+uI|Nsn}fV^&`?{P+{V1Q*RxG^TzxK z%|4}>w8<3`;fX<(4r%tyzS~1gEOt&TEG(q=lI@MuhfF_SPo0N79NtFBOt&Kl1VvBD}k#9!!ao{#_5rMi&9ic;}E~iIEMoQm}DPy-bF1@^dG5<<= zTN0&Sz%y@R?XpQf^;&!DyLX`w8B_tU@JGXTh57U6lTi}8_Hus7^-55@?O8M5CQoXU zCwd$33cEZtDm48nT$Jf+Zen8cK2@<7BOQ|65N%TJ=@|Fm0lp<#AcBeM5XRT_1g1_Q zG%PG4AX~+9b8ERi>P4H}aeaOLa3&@$IS-tKCB9K0O2L92P0c>(RljaV&msMuG0pYu zk%x^>qLi)!cC%Y9&-CkExWM>CBT5=?8_p?v5`J&doqG`v&-U2-_Vi2C{Ci0O8JQl| zG)bHH370C}GWi%;?6Ly`16d_3?($uiaO$gA=`QlOMuwZnd}tn8Y}w47aq!K(eNizn zG2W8}<>3L@4MR1HpRYtec<_0meyCV9CnpE6cieli+Zb8Bs*aE)K3oE=*d{lbf2rED z0H#1Cdky-@=*Uk?sdf=YVs)2Dhcc)bikk%bE3rQ_Uy47YfC{ED|&lr zDrCC1T%+E9Gh8ofVPRpszq-5=vPMVf!xJy!00|O-gG!H5&%Uarrshl4%{-Wekwx#c z;rBhua8HB0waLIIjKEN#S#^eGb7U|h3nYe9b0k+srMs=FluEf%zXb#=7^aiHy*v|< zo15G6{=Jm?j#B4ACHRt2Qu2uvM~BLk(uS==qYVuWS$F6Kw7V;2lJpG>)Ts3+1>=9n z7vShZjyCN?<&@IL9@P#7=s_n*J7zq4cJy6qt0f!?h||ID?ruGEbMDp$B4PRYCoK7d zQ8}obE@C6IZg>v{yLq3Ltm))bnC|j^btUZX1w+FF@a!^Z#b??{2hAXYn|W{I;u20; zhek89;Bd(4O-|;7NM?kIhQ9iPrpE_UWL|4A+!&&M#;`#%b4avuBt})|IGnhSEL|RK z6GH}7#wQe{MI^HhYAC(;Te&ljF~;wuD2#(T?) zt}(K>=*yO5)3Zw*L)zy?#jszG9j3=VWfhF~soAWKx!dP3br}=kNX$|r*KIz@JtoG- zV@o{!D7vFCLZ%#`=A~_Ws$FlH{7KbDX_#^ z^|Gn!y{Hi=^8sk`c;4V6S=ZsKgN9!&bYDD0YDpooWyHH_2U)hM`9Q8pA31Vl%ZCZB zxJO4vX>Tb~GqRYZhlbKYE(H7tJ9qthE}sen0h-b9iPRA7`Xl>JEFG`#;UAh!K`?}i zJ|!CV4jn{@+p`EKIA1Vc+Hs?r11`^-X4B{e*btsl0Z!mgK|=k%&IwWV+vB2E_Mn-+ zKmBy_gvmnpm*>~|19$|F98t5esUGifzIr}MS`r}5`7oLqojD}uHN);cwv~p%W#{BX za>$1!iPBIzA6!UdBMWUANcX!<->53ktsTPD)vio zOY0<4kC|6z0MEk*4;rg(IyaTq`)M3FkRQ(Bv~ut4xpOBEWKeOM_m=&Xuxj~A(=ToF zer0?RdlhQ0jK#wEyv5WP+Th_YegSq=cB_DioJ_7CWi$e_DKthRWXvOQI8igVh*uJ$ z1TnkQ=d`qjAK9j&W(jk1B@jxpeA7w=cup2VGsX@AW0!!04CZeR;IXx`QU@*s1ykmk zZoR@u$zC>EJ<7erx^-@8D%$o_f5S)rnVkF!clp%-#0BLB1a58f$J+u@l3@AKK0iL3e8M03&nG7(J&$sn=*n?j?)PfGL%E~o0*1%#AP45zL@x6>oTXV1FplAVBVhPq{+s)~jaP)h{P4Onh%P z#z7v2t`}kfa0vEeWl`>8Gs6*wg&WeWZk~@QesP;MBlWmX;Q&of%Y8W;#0Sung(#>-JPZd?Z(|MQ>RFK+c_KY>I0& zIGp#~TaAegS@Py!RP%5XmQ78qnjsig#i8WuLC@{$qp-E2TI@DDr#ge(Tr>u@R?lT` z@A$-TeeW(p0!btE@e$v}hsm5+^0HUs2N?{9*T8DTrLw7F6e0XtG}JUuCr8cw7Do{% zJ#pFXUNIxf_u08QpGL2``Ku?cEMZ{q(3RD8p2)<+#PJTo-AffNt|McZ{f@oQIG_Zu z9~BUg6s`3zH}(0cV0k|w>pJs)b|U|x!9i~~Flk9pb`h7`og`l~oz>Dl2{@K^%Eiv; zwNe=kw>R%U>ht0mKH>Z0J`625^BpJmFOkgr=ARUMe^Hb0b`caiNc-kL1gCUOH+T1l z|D#{wuS4Y|OPrz^P!6K}bc%ljhUq*0#3Ia3vu7)!7#EZ^V8S&_ijw z3|Jbz+Zf}>35_+EU)ZRbo0~%&6S3Y{A?XO==v+p!;w9Tqga)C}cgKsQ=xE<8=csT= z*IwZ!)?QA$A2prNJl0fapc}fix!$Z5MLgTm{N?Bop|;wJFqxQa3Tz1}We#=ig3)Wm zLLIYe^{p;l@@`jcsu?4*i_DJu=c4(9UBXkO%VL?>e|{i;y>{2=-G8`i2x@NwclzjA z3ChEDfr0U^lLb9I9#^V8FGoM@kqq9aEC>n1mLhjk0&1v~ssGjjtN7Im=q|Lqy(S1p z9zt=m|H0M=o5sEb4)Oujdy(S%q2GHj0-6&f2>Yt0LHC6MDoBH3BMz?*V%TrpQlxK^ z05M$ZqUe*%DAT{73q12+l)@bIAAkd$3zIV9I!dcuE7Q)HlVEsA>0a`j`f`wkWpw*c zYwGGo;c&ILj{~Wv&^7>HBPKRhK)$+G0!USPZmzI^RL8fM&UjkEg^&@e`T`~vlgfAR z&Iw4pI$pFPhgc&UO&H&=2usf4tYqN$+@!eN#|_X2<-msRuH`!!<>9BF0&BL`j_aA2 z>}O)Kw$21)DN26J>TH3#Z696PRKM<8Kdj8ipG}jh<2b^A;TV4CDiZ zFoQ}XdYdrbKomQi4vC$dH2UXN_>&ER=`$~pru#~I!ggi^#-8h(9yI0>+pHIXiUs$c zIQEtSx+U2&NCN4gnjS30HrR?0?(6|E+)tFcq<87W}$y;Z&wNC+ZXYhmQ z@x3N@bK-!!=ae{F0AveD_(YIL*&@R7UNchY9h*O+F!DLP!o0%TWuSFHG^N*NudL*e z^hk&<%lKrKnVA_K6N3!Z#lnIJ1R$>&?qh8rmKrqG3=v+e$qR#p>!irBQRxMI18RY; z_MEnu{9J&yy94Sa47A$f&;DvJ-`Q6*!FG2z(_+Gg~Pay(9lsk4I>Z_{a*(Y%x-`Ue6KYpmI zs|Q1(Lcc5JP7%Yy7M5LvH~W;s0Rcc72E377?sK;U^&6bsoOsG%xXVBn3D7-B(5fq7 zmssL_0Ph-Shu$zCl9rxFsZTSLn^;fwpBku@2dJU91pSWkTU!W8PEJN_jR?K4i+M&( zK$Z|E3r0S)$TL7E7U+BngjM+n>_Dh_9k;pWm;+h)iaOOSu~ zmGEZn+b7f`Ie!v2&KRakoRpMIF;dC`zb@5#I0S)FcqF@gy(j8T;8fb;$cwM{h@q{m zt;4zGKf^(j`V3laI6`Is*PUm~A;9ksL=SKM(Ci=>Imn8N5p4%9N<>zge^s|LVh4bs zinu*K$m2Yno1Z6qwM~lv>AVew#X5K^FR*KR32C;H16%nM$0QD;F3F6nzAZ`X2bYCV?*Wbk-xBQTJ zs@P@dj5Dy?L{+FT;Qi)wZ<2#^X`2y*+3KQ&0NhYn_aBW>{tHjQ^;2?68w+OO7#>q5-$Fb;a}Vfr_JwVG zh zOgNY>JIW*2PYHt)(_xYNMs_Tjd3AM_#@`oQ7SNEA!*K>WB&yDg)!U`^u26u7t0oJ> zoc$)R=z&21)tZT^*m8WJ8kWMB~cTw`KEeQQ%DZ?H<=xMXe8OGo|bD8$$#@Q_hQ&nH}%92}?y zHF4K}3cEqz*|+NT<3&k{#rK8)*CrO@#(uBCY3gTc^I7$Fu2r7X?BxSK4jW75^%1}+ zu~7^h(wr-T_gVpu*NTfU^;->T`j8i9U^X=7Uw&cHmMm+23CYMI4Hk_B^5V3sdY^x1 zJzh(!_JCN;?3o*1OO6OHKjkJBc}d;qzr_7hv$7+P_?=b3+r<>7kB*D;XpEA5>FDP1 zeo=-k78L`Tef@nyap_cmh~7%az)`Xsn7sFVOXF}$#6cc7lkk(SQcPBcoalP5In)L^ z-vOa9&+ypbG8weMMvoasxXNqS$K1c=AE?oPk$=A${?3&*d3=DmAx-2@FT$)=dCZx- zM8-FU7=%*;*7a1n_asP;6;0G&XTNtw0Q@n;#>aDk-_Jm)(Xh>M?lckzVk^mDc!5)| zF^vg2QXaB2Eze2|4xmlvvwfDI^EF=%^wi!iT6wFRD!v~e=sZ|wgd*}Ka%hJ?urZhfV`eDx~tDimG zw($GC+`oJA)*Kb9BlmXqulI0&C(L<;k5c^ITDN!F1h+WZ!vTiW|MJLoIl$SmMy95| zEsjtGX!=uQV=a$mt0M0fBiOzh0ALi~oct_D)CDSsA+PlW2g(60QX+iP5{vMh{?>g9 zJ6Pwpz44OH12w?BH42t*j(_9>1hhE}mFBxMZe#>G>Q=V!x(NfU8$nies0G1SYP?-N zX8KmsdjWhb))s!^!D7Rw83A4~J$m$r#z}tYF7xnl%Ze2Du> zR!wPwR8f))BW;scS1jW@tvm~F|DL<%Nm6O86&ct#S z{hwMdzlENNMkwv~b~-vEd_lQ6hS**?U4{)|62P+pw`BhT#Wy5YXHCCgd2w34K?OWQ z7vSZH8_z+-{`Qwd_>TIKNY|C|^y#rOXR-XnI-Qg%v|CA0Gt}dNtwjvExNZG`s2M_x zrIGXV{{5qQLYzSxh@|}d8#|!1zF}Y$Z;?U0W}1B^{$~!kaJp+#D-n|2@lA81^^)K( z|3k?BS8t0O^d4GJ?Qg0w_Cj(kvGDfb!k602*NhSO`;NZdeK`3k$Z2*-ijOBSQwbFR rH5Ne@|F4Ps7wm$=0DmZ>EoFt*k@8YjK6CIQpF&ej2VHpPYT*9>qCiBa literal 0 HcmV?d00001 From 6c362a3b8501f989363eabc302e1906a759e3b3e Mon Sep 17 00:00:00 2001 From: Vanessa Leite Date: Wed, 4 Feb 2026 10:29:29 +0100 Subject: [PATCH 11/11] Update release notes --- docs/about/release_notes.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/about/release_notes.md b/docs/about/release_notes.md index 1f5a15691..556181ce3 100644 --- a/docs/about/release_notes.md +++ b/docs/about/release_notes.md @@ -1,5 +1,11 @@ # Release notes +## v3.1.3 (04/02/2026) + +* Add tests for mapping non-sequential networks +* Fix networks used for tests, they didnt meet the hardware constraints +* Update documentation with the non-sequential networks types that have a test + ## v3.1.2 (12/12/2025) * Update use of visualizer in Sinabs tutorials, previous tutorials were using a deprecated visualizer version.