Skip to content

Commit d89d6d9

Browse files
authored
Merge pull request #486 from neuromatch/staging
Staging
2 parents c208310 + c4aaf12 commit d89d6d9

5 files changed

Lines changed: 132 additions & 35 deletions

File tree

.github/actions/setup-ci-tools/action.yml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,3 +88,10 @@ runs:
8888
- name: Ignore ci directory
8989
shell: bash
9090
run: echo "ci/" >> .gitignore
91+
92+
- name: Stub ipywidgets for headless kernel execution
93+
shell: bash
94+
run: |
95+
mkdir -p ~/.ipython/profile_default/startup
96+
cp ${{ github.action_path }}/stub_widgets.py ~/.ipython/profile_default/startup/00-stub-widgets.py
97+
echo "Installed ipywidgets stub to IPython startup"
Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
# Stub ipywidgets for headless/CI execution.
2+
# Replaces blocking widget calls with no-ops so notebooks execute without hanging.
3+
# In Colab/Jupyter with a real frontend, the real ipywidgets is used instead.
4+
#
5+
# Installed into ~/.ipython/profile_default/startup/ by the setup-ci-tools action
6+
# so it runs automatically before any notebook cell when nbconvert spawns a kernel.
7+
import sys
8+
import types
9+
import inspect
10+
11+
12+
class _NoOpWidget:
13+
"""A no-op stand-in for any ipywidgets widget class."""
14+
15+
children = []
16+
17+
def __init__(self, *args, **kwargs):
18+
# Preserve value/options so _Interact can extract call defaults
19+
object.__setattr__(self, "value", kwargs.get("value", None))
20+
object.__setattr__(self, "options", kwargs.get("options", []))
21+
22+
def __enter__(self):
23+
return self
24+
25+
def __exit__(self, *args):
26+
pass
27+
28+
def __setattr__(self, name, value):
29+
object.__setattr__(self, name, value)
30+
31+
def __getattr__(self, name):
32+
# Return a no-op callable for any unknown method/attribute
33+
return lambda *args, **kwargs: None
34+
35+
36+
class _Interact:
37+
"""Stub for widgets.interact / widgets.interactive.
38+
39+
Calls the wrapped function once with default values extracted from
40+
widget stubs so that matplotlib outputs are captured by nbconvert.
41+
"""
42+
43+
def __call__(self, *args, **kwargs):
44+
if len(args) == 1 and callable(args[0]) and not kwargs:
45+
# Bare @widgets.interact — extract defaults from widget params
46+
return self._call_with_defaults(args[0])
47+
# @widgets.interact(param=slider) — return decorator
48+
widget_kwargs = kwargs
49+
50+
def decorator(f):
51+
return self._call_with_defaults(f, widget_kwargs)
52+
53+
return decorator
54+
55+
def _call_with_defaults(self, f, widget_kwargs=None):
56+
sig = inspect.signature(f)
57+
call_kwargs = {}
58+
for name, param in sig.parameters.items():
59+
widget = (widget_kwargs or {}).get(name)
60+
if widget is None and param.default is not inspect.Parameter.empty:
61+
widget = param.default
62+
if isinstance(widget, _NoOpWidget) and widget.value is not None:
63+
call_kwargs[name] = widget.value
64+
elif widget is not None and not isinstance(widget, _NoOpWidget):
65+
call_kwargs[name] = widget
66+
try:
67+
f(**call_kwargs)
68+
except Exception as e:
69+
print(f"[stub] interact call skipped: {e}")
70+
return f
71+
72+
73+
class _StubModule(types.ModuleType):
74+
"""ipywidgets stub module.
75+
76+
Any attribute access returns _NoOpWidget so that
77+
'from ipywidgets import AnythingAtAll' always succeeds.
78+
"""
79+
80+
interact = _Interact()
81+
interactive = _Interact()
82+
83+
def __getattr__(self, name):
84+
if name.startswith("__"):
85+
raise AttributeError(name)
86+
return _NoOpWidget
87+
88+
89+
stub = _StubModule("ipywidgets")
90+
stub.widgets = stub # support: from ipywidgets import widgets
91+
sys.modules["ipywidgets"] = stub
92+
sys.modules["ipywidgets.widgets"] = stub
93+
94+
print("ipywidgets stubbed for headless CI execution")

tutorials/W2D4_Macrolearning/W2D4_Tutorial3.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
"cells": [
33
{
44
"cell_type": "markdown",
5+
"id": "b96e93dc",
56
"metadata": {
67
"colab_type": "text",
78
"execution": {},
89
"id": "view-in-github"
910
},
1011
"source": [
11-
"<a href=\"https://colab.research.google.com/github/neuromatch/NeuroAI_Course/blob/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> &nbsp; <a href=\"https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open in Kaggle\"/></a>"
12+
"<a href=\"https://colab.research.google.com/github/neuromatch/NeuroAI_Course/blob/main/tutorials/W2D4_Macrolearning/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> &nbsp; <a href=\"https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/main/tutorials/W2D4_Macrolearning/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open in Kaggle\"/></a>"
1213
]
1314
},
1415
{
@@ -77,8 +78,7 @@
7778
},
7879
"source": [
7980
"---\n",
80-
"# Setup\n",
81-
"\n"
81+
"# Setup"
8282
]
8383
},
8484
{
@@ -160,7 +160,7 @@
160160
"logging.getLogger('matplotlib.font_manager').disabled = True\n",
161161
"\n",
162162
"%matplotlib inline\n",
163-
"%config InlineBackend.figure_format = 'retina' # perfrom high definition rendering for images and plots\n",
163+
"%config InlineBackend.figure_format = 'retina' # perform high definition rendering for images and plots\n",
164164
"plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/main/nma.mplstyle\")"
165165
]
166166
},
@@ -444,7 +444,7 @@
444444
"\n",
445445
" def __len__(self):\n",
446446
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
447-
" thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
447+
" thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
448448
"\n",
449449
" return self.num_epochs * self.num_tasks\n",
450450
"\n",
@@ -758,7 +758,7 @@
758758
"\n",
759759
" def __len__(self):\n",
760760
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
761-
" thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
761+
" thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
762762
"\n",
763763
" return self.num_epochs * self.num_tasks\n",
764764
"\n",
@@ -903,7 +903,7 @@
903903
"source": [
904904
"### Coding Exercise 1 Discussion\n",
905905
"\n",
906-
"1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?\n"
906+
"1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?"
907907
]
908908
},
909909
{
@@ -1416,7 +1416,7 @@
14161416
"execution": {}
14171417
},
14181418
"source": [
1419-
"*Estimated timing to here from start of tutorial: 35 minutes*\n"
1419+
"*Estimated timing to here from start of tutorial: 35 minutes*"
14201420
]
14211421
},
14221422
{
@@ -1753,7 +1753,7 @@
17531753
"name": "python",
17541754
"nbconvert_exporter": "python",
17551755
"pygments_lexer": "ipython3",
1756-
"version": "3.9.22"
1756+
"version": "3.10.19"
17571757
}
17581758
},
17591759
"nbformat": 4,

tutorials/W2D4_Macrolearning/instructor/W2D4_Tutorial3.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
"cells": [
33
{
44
"cell_type": "markdown",
5+
"id": "d6cf2c1d",
56
"metadata": {
67
"colab_type": "text",
78
"execution": {},
89
"id": "view-in-github"
910
},
1011
"source": [
11-
"<a href=\"https://colab.research.google.com/github/neuromatch/NeuroAI_Course/blob/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> &nbsp; <a href=\"https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open in Kaggle\"/></a>"
12+
"<a href=\"https://colab.research.google.com/github/neuromatch/NeuroAI_Course/blob/main/tutorials/W2D4_Macrolearning/instructor/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> &nbsp; <a href=\"https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/main/tutorials/W2D4_Macrolearning/instructor/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open in Kaggle\"/></a>"
1213
]
1314
},
1415
{
@@ -77,8 +78,7 @@
7778
},
7879
"source": [
7980
"---\n",
80-
"# Setup\n",
81-
"\n"
81+
"# Setup"
8282
]
8383
},
8484
{
@@ -160,7 +160,7 @@
160160
"logging.getLogger('matplotlib.font_manager').disabled = True\n",
161161
"\n",
162162
"%matplotlib inline\n",
163-
"%config InlineBackend.figure_format = 'retina' # perfrom high definition rendering for images and plots\n",
163+
"%config InlineBackend.figure_format = 'retina' # perform high definition rendering for images and plots\n",
164164
"plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/main/nma.mplstyle\")"
165165
]
166166
},
@@ -444,7 +444,7 @@
444444
"\n",
445445
" def __len__(self):\n",
446446
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
447-
" thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
447+
" thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
448448
"\n",
449449
" return self.num_epochs * self.num_tasks\n",
450450
"\n",
@@ -758,7 +758,7 @@
758758
"\n",
759759
" def __len__(self):\n",
760760
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
761-
" thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
761+
" thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
762762
"\n",
763763
" return self.num_epochs * self.num_tasks\n",
764764
"\n",
@@ -905,7 +905,7 @@
905905
"source": [
906906
"### Coding Exercise 1 Discussion\n",
907907
"\n",
908-
"1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?\n"
908+
"1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?"
909909
]
910910
},
911911
{
@@ -1420,7 +1420,7 @@
14201420
"execution": {}
14211421
},
14221422
"source": [
1423-
"*Estimated timing to here from start of tutorial: 35 minutes*\n"
1423+
"*Estimated timing to here from start of tutorial: 35 minutes*"
14241424
]
14251425
},
14261426
{
@@ -1759,7 +1759,7 @@
17591759
"name": "python",
17601760
"nbconvert_exporter": "python",
17611761
"pygments_lexer": "ipython3",
1762-
"version": "3.9.22"
1762+
"version": "3.10.19"
17631763
}
17641764
},
17651765
"nbformat": 4,

tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
"cells": [
33
{
44
"cell_type": "markdown",
5+
"id": "252fd2fe",
56
"metadata": {
67
"colab_type": "text",
78
"execution": {},
89
"id": "view-in-github"
910
},
1011
"source": [
11-
"<a href=\"https://colab.research.google.com/github/neuromatch/NeuroAI_Course/blob/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> &nbsp; <a href=\"https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open in Kaggle\"/></a>"
12+
"<a href=\"https://colab.research.google.com/github/neuromatch/NeuroAI_Course/blob/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> &nbsp; <a href=\"https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/main/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open in Kaggle\"/></a>"
1213
]
1314
},
1415
{
@@ -77,8 +78,7 @@
7778
},
7879
"source": [
7980
"---\n",
80-
"# Setup\n",
81-
"\n"
81+
"# Setup"
8282
]
8383
},
8484
{
@@ -160,7 +160,7 @@
160160
"logging.getLogger('matplotlib.font_manager').disabled = True\n",
161161
"\n",
162162
"%matplotlib inline\n",
163-
"%config InlineBackend.figure_format = 'retina' # perfrom high definition rendering for images and plots\n",
163+
"%config InlineBackend.figure_format = 'retina' # perform high definition rendering for images and plots\n",
164164
"plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/main/nma.mplstyle\")"
165165
]
166166
},
@@ -444,7 +444,7 @@
444444
"\n",
445445
" def __len__(self):\n",
446446
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
447-
" thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
447+
" thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
448448
"\n",
449449
" return self.num_epochs * self.num_tasks\n",
450450
"\n",
@@ -758,7 +758,7 @@
758758
"\n",
759759
" def __len__(self):\n",
760760
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
761-
" thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
761+
" thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
762762
"\n",
763763
" return self.num_epochs * self.num_tasks\n",
764764
"\n",
@@ -801,8 +801,7 @@
801801
"execution": {}
802802
},
803803
"source": [
804-
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_08b01bcf.py)\n",
805-
"\n"
804+
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_08b01bcf.py)"
806805
]
807806
},
808807
{
@@ -842,7 +841,7 @@
842841
"source": [
843842
"### Coding Exercise 1 Discussion\n",
844843
"\n",
845-
"1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?\n"
844+
"1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?"
846845
]
847846
},
848847
{
@@ -852,8 +851,7 @@
852851
"execution": {}
853852
},
854853
"source": [
855-
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_576c8d87.py)\n",
856-
"\n"
854+
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_576c8d87.py)"
857855
]
858856
},
859857
{
@@ -1015,8 +1013,7 @@
10151013
"execution": {}
10161014
},
10171015
"source": [
1018-
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_593cdcd4.py)\n",
1019-
"\n"
1016+
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_593cdcd4.py)"
10201017
]
10211018
},
10221019
{
@@ -1277,7 +1274,7 @@
12771274
"execution": {}
12781275
},
12791276
"source": [
1280-
"*Estimated timing to here from start of tutorial: 35 minutes*\n"
1277+
"*Estimated timing to here from start of tutorial: 35 minutes*"
12811278
]
12821279
},
12831280
{
@@ -1344,8 +1341,7 @@
13441341
"execution": {}
13451342
},
13461343
"source": [
1347-
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_2753b5eb.py)\n",
1348-
"\n"
1344+
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_2753b5eb.py)"
13491345
]
13501346
},
13511347
{
@@ -1589,7 +1585,7 @@
15891585
"name": "python",
15901586
"nbconvert_exporter": "python",
15911587
"pygments_lexer": "ipython3",
1592-
"version": "3.9.22"
1588+
"version": "3.10.19"
15931589
}
15941590
},
15951591
"nbformat": 4,

0 commit comments

Comments
 (0)