Skip to content

Commit 55614f2

Browse files
Merge pull request #488 from yoshitomo-matsubara/dev
Force-use pytorch hub when repo_or_dir is given & Fix typos
2 parents 7dc549c + b1fc3f7 commit 55614f2

File tree

5 files changed

+16
-15
lines changed

5 files changed

+16
-15
lines changed

configs/legacy/sample/ilsvrc2012/single_stage/ghnd/custom_inception_v3_from_inception_v3.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ datasets:
1111
transform_params:
1212
- type: 'RandomResizedCrop'
1313
params:
14-
size: &input_size [224, 224]
14+
size: &input_size [299, 299]
1515
- type: 'RandomHorizontalFlip'
1616
params:
1717
p: 0.5
@@ -30,7 +30,7 @@ datasets:
3030
transform_params:
3131
- type: 'Resize'
3232
params:
33-
size: 256
33+
size: 327
3434
- type: 'CenterCrop'
3535
params:
3636
size: *input_size

configs/legacy/sample/ilsvrc2012/single_stage/hnd/custom_inception_v3_from_inception_v3.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ datasets:
1111
transform_params:
1212
- type: 'RandomResizedCrop'
1313
params:
14-
size: &input_size [224, 224]
14+
size: &input_size [299, 299]
1515
- type: 'RandomHorizontalFlip'
1616
params:
1717
p: 0.5
@@ -30,7 +30,7 @@ datasets:
3030
transform_params:
3131
- type: 'Resize'
3232
params:
33-
size: 256
33+
size: 327
3434
- type: 'CenterCrop'
3535
params:
3636
size: *input_size

configs/sample/ilsvrc2012/ghnd/custom_inception_v3_from_inception_v3.yaml

+5-5
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ datasets:
1515
key: 'torchvision.transforms.RandomResizedCrop'
1616
init:
1717
kwargs:
18-
size: &input_size [224, 224]
18+
size: &input_size [299, 299]
1919
- !import_call
2020
key: 'torchvision.transforms.RandomHorizontalFlip'
2121
init:
@@ -44,7 +44,7 @@ datasets:
4444
key: 'torchvision.transforms.Resize'
4545
init:
4646
kwargs:
47-
size: 256
47+
size: 327
4848
- !import_call
4949
key: 'torchvision.transforms.CenterCrop'
5050
init:
@@ -131,11 +131,11 @@ train:
131131
student:
132132
forward_proc: 'forward_batch_only'
133133
adaptations:
134-
sequential: ['bottleneck', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
134+
sequential: ['bottleneck_layer', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
135135
frozen_modules: ['Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
136136
forward_hook:
137137
input: []
138-
output: ['bottleneck', 'Mixed_5d', 'Mixed_6e', 'Mixed_7c']
138+
output: ['bottleneck_layer', 'Mixed_5d', 'Mixed_6e', 'Mixed_7c']
139139
wrapper: 'DistributedDataParallel'
140140
requires_grad: True
141141
optimizer:
@@ -161,7 +161,7 @@ train:
161161
kwargs:
162162
input:
163163
is_from_teacher: False
164-
module_path: 'bottleneck'
164+
module_path: 'bottleneck_layer'
165165
io: 'output'
166166
target:
167167
is_from_teacher: True

configs/sample/ilsvrc2012/hnd/custom_inception_v3_from_inception_v3.yaml

+6-5
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ datasets:
1515
key: 'torchvision.transforms.RandomResizedCrop'
1616
init:
1717
kwargs:
18-
size: &input_size [224, 224]
18+
size: &input_size [299, 299]
1919
- !import_call
2020
key: 'torchvision.transforms.RandomHorizontalFlip'
2121
init:
@@ -44,7 +44,7 @@ datasets:
4444
key: 'torchvision.transforms.Resize'
4545
init:
4646
kwargs:
47-
size: 256
47+
size: 327
4848
- !import_call
4949
key: 'torchvision.transforms.CenterCrop'
5050
init:
@@ -68,6 +68,7 @@ models:
6868
weights: !getattr [*teacher_weights, 'IMAGENET1K_V1']
6969
src_ckpt:
7070
student_model:
71+
repo_or_dir: 'yoshitomo-matsubara/sc2-benchmark'
7172
key: &student_model_key 'custom_inception_v3'
7273
_weights: &student_weights !import_get
7374
key: 'torchvision.models.inception.Inception_V3_Weights'
@@ -131,11 +132,11 @@ train:
131132
student:
132133
forward_proc: 'forward_batch_only'
133134
adaptations:
134-
sequential: ['bottleneck']
135+
sequential: ['bottleneck_layer']
135136
frozen_modules: []
136137
forward_hook:
137138
input: []
138-
output: ['bottleneck']
139+
output: ['bottleneck_layer']
139140
wrapper: 'DistributedDataParallel'
140141
requires_grad: True
141142
optimizer:
@@ -161,7 +162,7 @@ train:
161162
kwargs:
162163
input:
163164
is_from_teacher: False
164-
module_path: 'bottleneck'
165+
module_path: 'bottleneck_layer'
165166
io: 'output'
166167
target:
167168
is_from_teacher: True

torchdistill/models/registry.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def get_model(key, repo_or_dir=None, *args, **kwargs):
148148
:return: model.
149149
:rtype: nn.Module
150150
"""
151-
if key in MODEL_DICT:
151+
if key in MODEL_DICT and repo_or_dir is None:
152152
return MODEL_DICT[key](*args, **kwargs)
153153
elif repo_or_dir is not None:
154154
return torch.hub.load(repo_or_dir, key, *args, **kwargs)

0 commit comments

Comments
 (0)