Skip to content

Commit

Permalink
[Refactor] default runtime config (#447)
Browse files Browse the repository at this point in the history
* init

* Update default_runtime.py
  • Loading branch information
zytx121 authored Aug 11, 2022
1 parent 0cb51bd commit 45224a3
Show file tree
Hide file tree
Showing 5 changed files with 125 additions and 68 deletions.
37 changes: 21 additions & 16 deletions configs/_base_/default_runtime.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,24 @@
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
default_scope = 'mmrotate'

default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'))

env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)

# TODO: Visualizer is not ready.
# vis_backends = [dict(type='LocalVisBackend')]
# visualizer = dict(
# type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)

dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]

# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
resume = False
39 changes: 26 additions & 13 deletions configs/_base_/schedules/schedule_1x.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,27 @@
# evaluation
evaluation = dict(interval=1, metric='mAP')
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]

# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
checkpoint_config = dict(interval=1)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
clip_grad=dict(max_norm=35, norm_type=2))
39 changes: 26 additions & 13 deletions configs/_base_/schedules/schedule_3x.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,27 @@
# evaluation
evaluation = dict(interval=1, metric='mAP')
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=36, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[24, 33],
gamma=0.1)
]

# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[24, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
checkpoint_config = dict(interval=1)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
clip_grad=dict(max_norm=35, norm_type=2))
39 changes: 26 additions & 13 deletions configs/_base_/schedules/schedule_40e.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,27 @@
# evaluation
evaluation = dict(interval=1, metric='mAP')
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=40, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=40,
by_epoch=True,
milestones=[24, 32, 38],
gamma=0.1)
]

# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[24, 32, 38])
runner = dict(type='EpochBasedRunner', max_epochs=40)
checkpoint_config = dict(interval=1)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
clip_grad=dict(max_norm=35, norm_type=2))
39 changes: 26 additions & 13 deletions configs/_base_/schedules/schedule_6x.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,27 @@
# evaluation
evaluation = dict(interval=1, metric='mAP')
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=72, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=72,
by_epoch=True,
milestones=[48, 66],
gamma=0.1)
]

# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[48, 66])
runner = dict(type='EpochBasedRunner', max_epochs=72)
checkpoint_config = dict(interval=1)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001),
clip_grad=dict(max_norm=35, norm_type=2))

0 comments on commit 45224a3

Please sign in to comment.