Swin Transformer for Object Detection - Inference with COCO dataset 3. Cascade Mask RCNN (SwinTransformer)
on Projects
1. Reference
2. Edit the code
configs/swin/cascade_mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py
_base_ = [
'../_base_/models/cascade_mask_rcnn_swin_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='CascadeRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[27, 33])
runner = dict(max_epochs=36)
configs/base/models/cascade_mask_rcnn_swin_fpn.py
# model settings
model = dict(
type='CascadeRCNN',
pretrained=None,
backbone=dict(
type='SwinTransformer',
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
use_checkpoint=False),
neck=dict(
type='FPN',
in_channels=[96, 192, 384, 768],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
3. Train
- 4 GPU 3090 RTX
- 3~4 days
cascade_mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py
bash ./tools/dist_train.sh configs/swin/cascade_mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py 4
cascade_mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py
bash ./tools/dist_train.sh configs/swin/cascade_mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py 4
4. Test
cascade_mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py
python tools/test.py configs/swin/cascade_mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py checkpoint/cascade_mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_swin_fpn.pth --eval bbox segm
cascade_mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py
python tools/test.py configs/swin/cascade_mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py checkpoint/cascade_mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_swin_fpn.pth --eval bbox segm
5. Results
cascade_mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py
Evaluating bbox...
Loading and preparing results...
DONE (t=0.28s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=15.67s).
Accumulating evaluation results...
DONE (t=3.08s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.509
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.700
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.557
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.347
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.549
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.661
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.632
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.632
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.632
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.470
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.670
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.783
Evaluating segm...
/opt/conda/lib/python3.7/site-packages/mmdet/datasets/coco.py:474: UserWarning: The key "bbox" is deleted for more accurate mask AP of small/medium/large instances since v2.12.0. This does not change the overall mAP calculation.
UserWarning)
Loading and preparing results...
DONE (t=0.62s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *segm*
DONE (t=17.42s).
Accumulating evaluation results...
/opt/conda/lib/python3.7/site-packages/pycocotools/cocoeval.py:378: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
DONE (t=3.20s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.443
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.675
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.479
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.257
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.478
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.632
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.557
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.557
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.557
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.392
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.597
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.720
OrderedDict([('bbox_mAP', 0.509), ('bbox_mAP_50', 0.7), ('bbox_mAP_75', 0.557), ('bbox_mAP_s', 0.347), ('bbox_mAP_m', 0.549), ('bbox_mAP_l', 0.661), ('bbox_mAP_copypaste', '0.509 0.700 0.557 0.347 0.549 0.661'), ('segm_mAP', 0.443), ('segm_mAP_50', 0.675), ('segm_mAP_75', 0.479), ('segm_mAP_s', 0.257), ('segm_mAP_m', 0.478), ('segm_mAP_l', 0.632), ('segm_mAP_copypaste', '0.443 0.675 0.479 0.257 0.478 0.632')])
cascade_mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py
Evaluating bbox...
Loading and preparing results...
DONE (t=0.28s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=17.13s).
Accumulating evaluation results...
DONE (t=3.64s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.489
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.681
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.536
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.321
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.522
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.634
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.620
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.620
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.620
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.457
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.657
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.768
Evaluating segm...
/opt/conda/lib/python3.7/site-packages/mmdet/datasets/coco.py:474: UserWarning: The key "bbox" is deleted for more accurate mask AP of small/medium/large instances since v2.12.0. This does not change the overall mAP calculation.
UserWarning)
Loading and preparing results...
DONE (t=0.73s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *segm*
DONE (t=19.19s).
Accumulating evaluation results...
/opt/conda/lib/python3.7/site-packages/pycocotools/cocoeval.py:378: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
DONE (t=3.45s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.428
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=1000 ] = 0.655
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=1000 ] = 0.464
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.229
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.460
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.616
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.549
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=300 ] = 0.549
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=1000 ] = 0.549
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=1000 ] = 0.372
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=1000 ] = 0.590
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=1000 ] = 0.712
OrderedDict([('bbox_mAP', 0.489), ('bbox_mAP_50', 0.681), ('bbox_mAP_75', 0.536), ('bbox_mAP_s', 0.321), ('bbox_mAP_m', 0.522), ('bbox_mAP_l', 0.634), ('bbox_mAP_copypaste', '0.489 0.681 0.536 0.321 0.522 0.634'), ('segm_mAP', 0.428), ('segm_mAP_50', 0.655), ('segm_mAP_75', 0.464), ('segm_mAP_s', 0.229), ('segm_mAP_m', 0.46), ('segm_mAP_l', 0.616), ('segm_mAP_copypaste', '0.428 0.655 0.464 0.229 0.460 0.616')])
backbone | head | pretrained | dataset | box AP | mask AP |
---|---|---|---|---|---|
Swin T | Cascade Mask R-CNN | ImageNet 1k | COCO | 48.9 | 42.8 |
Swin S | Cascade Mask R-CNN | ImageNet 1k | COCO | 50.9 | 44.3 |