Skip to content

Commit c1eeb73

Browse files
Added notebook example to load activation maps.
1 parent e317457 commit c1eeb73

File tree

7 files changed

+528
-19
lines changed

7 files changed

+528
-19
lines changed

docs/source/conf.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,27 @@
3434
'sphinx.ext.doctest',
3535
'sphinx.ext.napoleon',
3636
'sphinx.ext.viewcode',
37+
'sphinx.ext.mathjax',
3738
'myst_nb'
3839
]
3940

41+
myst_enable_extensions = [
42+
"amsmath",
43+
# "attrs_inline",
44+
# "colon_fence",
45+
# "deflist",
46+
"dollarmath",
47+
# "fieldlist",
48+
# "html_admonition",
49+
# "html_image",
50+
# "linkify",
51+
# "replacements",
52+
# "smartquotes",
53+
# "strikethrough",
54+
# "substitution",
55+
# "tasklist",
56+
]
57+
4058
templates_path = ['_templates']
4159
exclude_patterns = []
4260

docs/source/examples.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ In the following notebooks, we show different examples of how to use :code:`oscu
55

66
.. toctree::
77
notebooks/quick_start
8+
notebooks/activation_maps
89
notebooks/odd_one_out
910
:maxdepth: 1

docs/source/notebooks/activation_maps.ipynb

Lines changed: 492 additions & 0 deletions
Large diffs are not rendered by default.

docs/source/notebooks/odd_one_out.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
"id": "2b21aabc-b485-4f2f-9ed2-3340ef923db1",
5454
"metadata": {},
5555
"source": [
56-
"## Prtrained features\n",
56+
"## Pretrained features\n",
5757
"\n",
5858
"Let's create a linear classifier on top of the extracted features from a pretrained network to \n",
5959
"perform a **4AFC odd-one-out (OOO)** task (i.e., which image out of four options is the \"odd\" one). \n",
@@ -69,7 +69,7 @@
6969
"metadata": {},
7070
"outputs": [],
7171
"source": [
72-
"architecture = 'vit_b_32' # networks' architecture\n",
72+
"architecture = 'vit_b_32' # network's architecture\n",
7373
"weights = 'vit_b_32' # the pretrained weights\n",
7474
"img_size = 224 # network's input size\n",
7575
"layer = 'block7' # the readout layer\n",

docs/source/notebooks/quick_start.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
"id": "2d0848b0-b0b8-4def-8bac-684e060b8623",
5858
"metadata": {},
5959
"source": [
60-
"## Prtrained features\n",
60+
"## Pretrained features\n",
6161
"\n",
6262
"Let's create a linear classifier on top of the extracted features from a pretrained network to \n",
6363
"perform a binary classification task (i.e., 2AFC – two-alternative-force-choice). This is easily \n",
@@ -71,7 +71,7 @@
7171
"metadata": {},
7272
"outputs": [],
7373
"source": [
74-
"architecture = 'resnet50' # networks' architecture\n",
74+
"architecture = 'resnet50' # network's architecture\n",
7575
"weights = 'resnet50' # the pretrained weights\n",
7676
"img_size = 224 # network's input size\n",
7777
"layer = 'block0' # the readout layer\n",

docs/source/notebooks/usage.ipynb

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,12 @@
77
"source": [
88
"# Usage\n",
99
"\n",
10-
"This notebook shows how to use the `osculari` package.\n",
10+
"This notebook demonstrates how to use the `osculari` package.\n",
1111
"\n",
12-
"The `osculari` package consists of three main `modules`:\n",
13-
"* `models`: to readout pretrained networks and add linear layers on top of them.\n",
14-
"* `datasets`: to create datasets and dataloaders to train and test linear probes.\n",
15-
"* `paradigms`: to implement psychophysical paradigms to experiment with deep networks."
12+
"The `osculari` package is organized into three main `modules`:\n",
13+
"* `models`: Used for reading pretrained networks and adding linear layers on top of them.\n",
14+
"* `datasets`: Used to create datasets and dataloaders for training and testing linear probes.\n",
15+
"* `paradigms`: Used to implement psychophysical paradigms for experimenting with deep networks."
1616
]
1717
},
1818
{
@@ -77,7 +77,7 @@
7777
},
7878
{
7979
"cell_type": "code",
80-
"execution_count": 73,
80+
"execution_count": 2,
8181
"id": "e74f3e20-bb57-4511-baf7-d18da5cb38ed",
8282
"metadata": {},
8383
"outputs": [
@@ -168,8 +168,7 @@
168168
" 'deeplabv3_resnet101',\n",
169169
" 'deeplabv3_resnet50',\n",
170170
" 'fcn_resnet101',\n",
171-
" 'fcn_resnet50',\n",
172-
" 'lraspp_mobilenet_v3_large'],\n",
171+
" 'fcn_resnet50'],\n",
173172
" 'taskonomy': ['taskonomy_autoencoding',\n",
174173
" 'taskonomy_class_object',\n",
175174
" 'taskonomy_class_scene',\n",
@@ -206,7 +205,7 @@
206205
" 'clip_ViT-L/14@336px']}"
207206
]
208207
},
209-
"execution_count": 73,
208+
"execution_count": 2,
210209
"metadata": {},
211210
"output_type": "execute_result"
212211
}
@@ -242,7 +241,7 @@
242241
" - Downloadable URL of the pretrained weights.\n",
243242
" - A string corresponding to the available weight, for instance, [PyTorch resnet50](https://pytorch.org/vision/stable/models/generated/torchvision.models.resnet50.html) supports one\n",
244243
"of the following strings: \\[\"*DEFAULT*\", \"*IMAGENET1K_V1*\", \"*IMAGENET1K_V2*\"\\].\n",
245-
" - The same name as `architecture` which loads the network's default weights.\n",
244+
" - The same name as `architecture`, which loads the network's default weights.\n",
246245
"* `layers` determines the read-out (cut-off) layer(s). Which layers are available for each network\n",
247246
"can be obtained by calling the `models.available_layers()` function.\n",
248247
"\n",
@@ -272,7 +271,7 @@
272271
}
273272
],
274273
"source": [
275-
"architecture = 'resnet50' # networks' architecture\n",
274+
"architecture = 'resnet50' # network's architecture\n",
276275
"weights = 'resnet50' # the pretrained weights\n",
277276
"layer = 'block0' # the readout layer\n",
278277
"readout_kwargs = { # parameters for extracting features from the pretrained network\n",
@@ -431,7 +430,7 @@
431430
}
432431
],
433432
"source": [
434-
"architecture = 'resnet50' # networks' architecture\n",
433+
"architecture = 'resnet50' # network's architecture\n",
435434
"weights = 'resnet50' # the pretrained weights\n",
436435
"img_size = 224 # network's input size\n",
437436
"layer = 'block0' # the readout layer\n",
@@ -540,7 +539,7 @@
540539
}
541540
],
542541
"source": [
543-
"architecture = 'resnet50' # networks' architecture\n",
542+
"architecture = 'resnet50' # network's architecture\n",
544543
"weights = 'resnet50' # the pretrained weights\n",
545544
"img_size = 224 # network's input size\n",
546545
"layer = 'block0' # the readout layer\n",
@@ -629,7 +628,7 @@
629628
}
630629
],
631630
"source": [
632-
"architecture = 'resnet50' # networks' architecture\n",
631+
"architecture = 'resnet50' # network's architecture\n",
633632
"weights = 'resnet50' # the pretrained weights\n",
634633
"img_size = 224 # network's input size\n",
635634
"layer = 'block0' # the readout layer\n",

tests/models/readout_test.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ def test_odd_one_out_net_loss_function():
125125
def test_preprocess_transform():
126126
# Test the preprocess_transform of BackboneNet
127127
net = readout.BackboneNet(architecture='taskonomy_autoencoding', weights=None)
128-
mean, std = net.normalise_mean_std
129128

130129
# Create a dummy input signal (replace this with your actual input)
131130
input_signal = np.random.uniform(size=(224, 224, 3))

0 commit comments

Comments
 (0)