Skip to content

Commit a57dd32

Browse files
Merge pull request #19 from ArashAkbarinia/development
Merging from Development
2 parents 847245e + c1eeb73 commit a57dd32

File tree

11 files changed

+653
-21
lines changed

11 files changed

+653
-21
lines changed

docs/source/conf.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,27 @@
3434
'sphinx.ext.doctest',
3535
'sphinx.ext.napoleon',
3636
'sphinx.ext.viewcode',
37+
'sphinx.ext.mathjax',
3738
'myst_nb'
3839
]
3940

41+
myst_enable_extensions = [
42+
"amsmath",
43+
# "attrs_inline",
44+
# "colon_fence",
45+
# "deflist",
46+
"dollarmath",
47+
# "fieldlist",
48+
# "html_admonition",
49+
# "html_image",
50+
# "linkify",
51+
# "replacements",
52+
# "smartquotes",
53+
# "strikethrough",
54+
# "substitution",
55+
# "tasklist",
56+
]
57+
4058
templates_path = ['_templates']
4159
exclude_patterns = []
4260

docs/source/examples.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ In the following notebooks, we show different examples of how to use :code:`oscu
55

66
.. toctree::
77
notebooks/quick_start
8+
notebooks/activation_maps
89
notebooks/odd_one_out
910
:maxdepth: 1

docs/source/notebooks/activation_maps.ipynb

Lines changed: 492 additions & 0 deletions
Large diffs are not rendered by default.

docs/source/notebooks/odd_one_out.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
"id": "2b21aabc-b485-4f2f-9ed2-3340ef923db1",
5454
"metadata": {},
5555
"source": [
56-
"## Prtrained features\n",
56+
"## Pretrained features\n",
5757
"\n",
5858
"Let's create a linear classifier on top of the extracted features from a pretrained network to \n",
5959
"perform a **4AFC odd-one-out (OOO)** task (i.e., which image out of four options is the \"odd\" one). \n",
@@ -69,7 +69,7 @@
6969
"metadata": {},
7070
"outputs": [],
7171
"source": [
72-
"architecture = 'vit_b_32' # networks' architecture\n",
72+
"architecture = 'vit_b_32' # network's architecture\n",
7373
"weights = 'vit_b_32' # the pretrained weights\n",
7474
"img_size = 224 # network's input size\n",
7575
"layer = 'block7' # the readout layer\n",

docs/source/notebooks/quick_start.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
"id": "2d0848b0-b0b8-4def-8bac-684e060b8623",
5858
"metadata": {},
5959
"source": [
60-
"## Prtrained features\n",
60+
"## Pretrained features\n",
6161
"\n",
6262
"Let's create a linear classifier on top of the extracted features from a pretrained network to \n",
6363
"perform a binary classification task (i.e., 2AFC – two-alternative-force-choice). This is easily \n",
@@ -71,7 +71,7 @@
7171
"metadata": {},
7272
"outputs": [],
7373
"source": [
74-
"architecture = 'resnet50' # networks' architecture\n",
74+
"architecture = 'resnet50' # network's architecture\n",
7575
"weights = 'resnet50' # the pretrained weights\n",
7676
"img_size = 224 # network's input size\n",
7777
"layer = 'block0' # the readout layer\n",

docs/source/notebooks/usage.ipynb

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,12 @@
77
"source": [
88
"# Usage\n",
99
"\n",
10-
"This notebook shows how to use the `osculari` package.\n",
10+
"This notebook demonstrates how to use the `osculari` package.\n",
1111
"\n",
12-
"The `osculari` package consists of three main `modules`:\n",
13-
"* `models`: to readout pretrained networks and add linear layers on top of them.\n",
14-
"* `datasets`: to create datasets and dataloaders to train and test linear probes.\n",
15-
"* `paradigms`: to implement psychophysical paradigms to experiment with deep networks."
12+
"The `osculari` package is organized into three main `modules`:\n",
13+
"* `models`: Used for reading pretrained networks and adding linear layers on top of them.\n",
14+
"* `datasets`: Used to create datasets and dataloaders for training and testing linear probes.\n",
15+
"* `paradigms`: Used to implement psychophysical paradigms for experimenting with deep networks."
1616
]
1717
},
1818
{
@@ -77,7 +77,7 @@
7777
},
7878
{
7979
"cell_type": "code",
80-
"execution_count": 73,
80+
"execution_count": 2,
8181
"id": "e74f3e20-bb57-4511-baf7-d18da5cb38ed",
8282
"metadata": {},
8383
"outputs": [
@@ -168,8 +168,7 @@
168168
" 'deeplabv3_resnet101',\n",
169169
" 'deeplabv3_resnet50',\n",
170170
" 'fcn_resnet101',\n",
171-
" 'fcn_resnet50',\n",
172-
" 'lraspp_mobilenet_v3_large'],\n",
171+
" 'fcn_resnet50'],\n",
173172
" 'taskonomy': ['taskonomy_autoencoding',\n",
174173
" 'taskonomy_class_object',\n",
175174
" 'taskonomy_class_scene',\n",
@@ -206,7 +205,7 @@
206205
" 'clip_ViT-L/14@336px']}"
207206
]
208207
},
209-
"execution_count": 73,
208+
"execution_count": 2,
210209
"metadata": {},
211210
"output_type": "execute_result"
212211
}
@@ -242,7 +241,7 @@
242241
" - Downloadable URL of the pretrained weights.\n",
243242
" - A string corresponding to the available weight, for instance, [PyTorch resnet50](https://pytorch.org/vision/stable/models/generated/torchvision.models.resnet50.html) supports one\n",
244243
"of the following strings: \\[\"*DEFAULT*\", \"*IMAGENET1K_V1*\", \"*IMAGENET1K_V2*\"\\].\n",
245-
" - The same name as `architecture` which loads the network's default weights.\n",
244+
" - The same name as `architecture`, which loads the network's default weights.\n",
246245
"* `layers` determines the read-out (cut-off) layer(s). Which layers are available for each network\n",
247246
"can be obtained by calling the `models.available_layers()` function.\n",
248247
"\n",
@@ -272,7 +271,7 @@
272271
}
273272
],
274273
"source": [
275-
"architecture = 'resnet50' # networks' architecture\n",
274+
"architecture = 'resnet50' # network's architecture\n",
276275
"weights = 'resnet50' # the pretrained weights\n",
277276
"layer = 'block0' # the readout layer\n",
278277
"readout_kwargs = { # parameters for extracting features from the pretrained network\n",
@@ -431,7 +430,7 @@
431430
}
432431
],
433432
"source": [
434-
"architecture = 'resnet50' # networks' architecture\n",
433+
"architecture = 'resnet50' # network's architecture\n",
435434
"weights = 'resnet50' # the pretrained weights\n",
436435
"img_size = 224 # network's input size\n",
437436
"layer = 'block0' # the readout layer\n",
@@ -540,7 +539,7 @@
540539
}
541540
],
542541
"source": [
543-
"architecture = 'resnet50' # networks' architecture\n",
542+
"architecture = 'resnet50' # network's architecture\n",
544543
"weights = 'resnet50' # the pretrained weights\n",
545544
"img_size = 224 # network's input size\n",
546545
"layer = 'block0' # the readout layer\n",
@@ -629,7 +628,7 @@
629628
}
630629
],
631630
"source": [
632-
"architecture = 'resnet50' # networks' architecture\n",
631+
"architecture = 'resnet50' # network's architecture\n",
633632
"weights = 'resnet50' # the pretrained weights\n",
634633
"img_size = 224 # network's input size\n",
635634
"layer = 'block0' # the readout layer\n",

osculari/models/pretrained_layers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def _available_segmentation_layers(architecture: str) -> List[str]:
177177
elif 'mobilenet' in architecture:
178178
return _available_mobilenet_layers(architecture)
179179
else:
180-
RuntimeError('Unsupported segmentation network: %s' % architecture)
180+
raise RuntimeError('Unsupported segmentation network: %s' % architecture)
181181

182182

183183
def _available_imagenet_layers(architecture: str) -> List[str]:

tests/models/model_utils_test.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,3 +96,8 @@ def test_generic_features_size_fc():
9696
img_size = 128
9797
output_size = model_utils.generic_features_size(model, img_size)
9898
assert output_size == torch.Size([1000])
99+
100+
101+
def test_register_model_hooks_invalid_layers():
102+
with pytest.raises(RuntimeError):
103+
_ = model_utils.register_model_hooks(None, 'resnet50', ['invalid_layer'])
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
"""
2+
Unit tests for pretrained_layers.py
3+
"""
4+
5+
import pytest
6+
7+
from osculari.models import pretrained_layers
8+
9+
10+
def test_available_segmentation_layers_invalid_network():
11+
with pytest.raises(RuntimeError):
12+
_ = pretrained_layers._available_segmentation_layers('invalid_architecture')
13+
14+
15+
def test_available_imagenet_layers_invalid_network():
16+
with pytest.raises(RuntimeError):
17+
_ = pretrained_layers._available_imagenet_layers('invalid_architecture')
18+
19+
20+
def test_available_layers_invalid_network():
21+
with pytest.raises(RuntimeError):
22+
_ = pretrained_layers.available_layers('invalid_architecture')
23+
24+
25+
def test_resnet_layer_invalid_layer():
26+
with pytest.raises(RuntimeError):
27+
_ = pretrained_layers.resnet_layer('invalid_layer')

tests/models/pretrained_models_test.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,3 +84,38 @@ def test_model_features_invalid_layer():
8484
network = pretrained_models.get_pretrained_model('resnet18', 'none')
8585
with pytest.raises(RuntimeError):
8686
_ = pretrained_models.model_features(network, 'resnet18', 'invalid_layer')
87+
88+
89+
def test_vit_layers_invalid_blocks():
90+
network = pretrained_models.get_pretrained_model('vit_b_32', 'none')
91+
with pytest.raises(RuntimeError):
92+
_ = pretrained_models.ViTLayers(network, 'block18')
93+
94+
95+
def test_vit_clip_layers_invalid_blocks():
96+
network = pretrained_models.get_pretrained_model('clip_ViT-B/32', 'none')
97+
with pytest.raises(RuntimeError):
98+
_ = pretrained_models.ViTClipLayers(network.visual, 'block18')
99+
100+
101+
def test_regnet_layers_invalid_layer():
102+
network = pretrained_models.get_pretrained_model('regnet_x_16gf', 'none')
103+
with pytest.raises(RuntimeError):
104+
_ = pretrained_models._regnet_features(network, 'invalid_layer')
105+
106+
107+
def test_vgg_layers_invalid_layer():
108+
network = pretrained_models.get_pretrained_model('vgg11', 'none')
109+
with pytest.raises(RuntimeError):
110+
_ = pretrained_models._vgg_features(network, 'invalid_layer')
111+
112+
113+
def test_vgg_layers_classifier_layer():
114+
network = pretrained_models.get_pretrained_model('vgg11', 'none')
115+
features = pretrained_models._vgg_features(network, 'classifier0')
116+
assert isinstance(list(features.children())[-1], torch.nn.Linear)
117+
118+
119+
def test_model_features_invalid_network():
120+
with pytest.raises(RuntimeError):
121+
_ = pretrained_models.model_features(None, 'invalid_architecture', 'fc')

tests/models/readout_test.py

Lines changed: 57 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,12 @@
33
"""
44

55
import pytest
6+
import numpy as np
67
import torch
78
from torch.testing import assert_close
89

9-
from osculari.models import OddOneOutNet, load_paradigm_ooo, load_paradigm_2afc
10+
from osculari.models import OddOneOutNet, load_paradigm_ooo
11+
from osculari.models import readout
1012

1113

1214
def test_odd_one_out_net_few_inputs():
@@ -38,7 +40,7 @@ def test_odd_one_out_net_init_diff():
3840

3941
@pytest.mark.parametrize("merge_paradigm,expected", [("cat", 4), ("diff", 4)])
4042
def test_odd_one_out_net_forward_cat(merge_paradigm, expected):
41-
# Test the forward pass of OddOneOutNet with merge_paradigm='cat'
43+
# Test the forward pass of OddOneOutNet with merge_paradigm
4244
input_nodes = 4
4345
img_size = 224
4446
net = OddOneOutNet(input_nodes=input_nodes, merge_paradigm=merge_paradigm,
@@ -54,6 +56,41 @@ def test_odd_one_out_net_forward_cat(merge_paradigm, expected):
5456
assert output.shape == (2, input_nodes)
5557

5658

59+
def test_readout_mix_features_no_pooling():
60+
# Test the readout with mix features with no pooling
61+
input_nodes = 4
62+
img_size = 224
63+
with pytest.raises(RuntimeError):
64+
_ = OddOneOutNet(input_nodes=input_nodes, merge_paradigm='cat', architecture='resnet50',
65+
weights=None, layers=['block0', 'fc'], img_size=img_size)
66+
67+
68+
def test_readout_mix_features_invalid_pooling():
69+
# Test the readout with mix features with no pooling
70+
input_nodes = 4
71+
img_size = 224
72+
with pytest.raises(RuntimeError):
73+
_ = OddOneOutNet(input_nodes=input_nodes, merge_paradigm='cat', architecture='resnet50',
74+
weights=None, layers=['block0', 'fc'], img_size=img_size,
75+
pooling='invalid')
76+
77+
78+
def test_readout_mix_features_():
79+
# Test the readout with mix features
80+
input_nodes = 4
81+
img_size = 224
82+
net = OddOneOutNet(input_nodes=input_nodes, merge_paradigm='cat', architecture='resnet50',
83+
weights=None, layers=['block0', 'fc'], img_size=img_size, pooling='avg_2_2')
84+
85+
x1 = torch.randn(2, 3, img_size, img_size)
86+
x2 = torch.randn(2, 3, img_size, img_size)
87+
x3 = torch.randn(2, 3, img_size, img_size)
88+
x4 = torch.randn(2, 3, img_size, img_size)
89+
90+
output = net(x1, x2, x3, x4)
91+
assert output.shape == (2, input_nodes)
92+
93+
5794
def test_odd_one_out_net_serialization():
5895
# Test the serialization of OddOneOutNet
5996
input_nodes = 4
@@ -83,3 +120,21 @@ def test_odd_one_out_net_loss_function():
83120

84121
loss = net.loss_function(output, target)
85122
assert loss.item() >= 0
123+
124+
125+
def test_preprocess_transform():
126+
# Test the preprocess_transform of BackboneNet
127+
net = readout.BackboneNet(architecture='taskonomy_autoencoding', weights=None)
128+
129+
# Create a dummy input signal (replace this with your actual input)
130+
input_signal = np.random.uniform(size=(224, 224, 3))
131+
132+
# Apply the transformations
133+
transform = net.preprocess_transform()
134+
output_signal = transform(input_signal)
135+
136+
# Check if the output has the correct shape
137+
assert output_signal.shape == (3, 224, 224)
138+
139+
# Check if the output has the correct normalization
140+
assert -1 <= torch.all(output_signal) <= 1

0 commit comments

Comments
 (0)