-
Notifications
You must be signed in to change notification settings - Fork 2.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: add zero_mould v1 and v2 config files
- Loading branch information
1 parent
01e82a7
commit 837ee77
Showing
2 changed files
with
574 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,287 @@ | ||
# https://github.com/open-mmlab/mmsegmentation/blob/main/configs/unet/README.md | ||
crop_size = ( | ||
256, | ||
256, | ||
) | ||
data_preprocessor = dict( | ||
bgr_to_rgb=True, | ||
mean=[ | ||
123.675, | ||
116.28, | ||
103.53, | ||
], | ||
pad_val=0, | ||
seg_pad_val=255, | ||
size=crop_size, | ||
std=[ | ||
58.395, | ||
57.12, | ||
57.375, | ||
], | ||
type='SegDataPreProcessor') | ||
data_root = 'data/zero_mould_v1/' | ||
dataset_type = 'ZeroMouldV1Dataset' | ||
default_hooks = dict( | ||
checkpoint=dict(by_epoch=False, interval=4000, type='CheckpointHook'), | ||
logger=dict(interval=50, log_metric_by_epoch=False, type='LoggerHook'), | ||
param_scheduler=dict(type='ParamSchedulerHook'), | ||
sampler_seed=dict(type='DistSamplerSeedHook'), | ||
timer=dict(type='IterTimerHook'), | ||
visualization=dict(type='SegVisualizationHook')) | ||
default_scope = 'mmseg' | ||
device = 'cuda' | ||
env_cfg = dict( | ||
cudnn_benchmark=True, | ||
dist_cfg=dict(backend='nccl'), | ||
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) | ||
gpu_ids = range(0, 1) | ||
img_ratios = [ | ||
0.5, | ||
0.75, | ||
1.0, | ||
1.25, | ||
1.5, | ||
1.75, | ||
] | ||
load_from = 'checkpoints/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_256x256_40k_hrf_20211210_202032-59daf7a4.pth' | ||
log_level = 'INFO' | ||
log_processor = dict(by_epoch=False) | ||
model = dict( | ||
auxiliary_head=dict( | ||
align_corners=False, | ||
channels=256, | ||
concat_input=False, | ||
dropout_ratio=0.1, | ||
in_channels=1024, | ||
in_index=2, | ||
loss_decode=dict( | ||
loss_weight=0.4, type='CrossEntropyLoss', use_sigmoid=False), | ||
norm_cfg=dict(requires_grad=True, type='BN'), | ||
num_classes=5, | ||
num_convs=1, | ||
type='FCNHead'), | ||
backbone=dict( | ||
contract_dilation=True, | ||
depth=50, | ||
dilations=( | ||
1, | ||
1, | ||
2, | ||
4, | ||
), | ||
norm_cfg=dict(requires_grad=True, type='BN'), | ||
norm_eval=False, | ||
num_stages=4, | ||
out_indices=( | ||
0, | ||
1, | ||
2, | ||
3, | ||
), | ||
strides=( | ||
1, | ||
2, | ||
1, | ||
1, | ||
), | ||
style='pytorch', | ||
type='ResNetV1c'), | ||
data_preprocessor=dict( | ||
bgr_to_rgb=True, | ||
mean=[ | ||
123.675, | ||
116.28, | ||
103.53, | ||
], | ||
pad_val=0, | ||
seg_pad_val=255, | ||
size=crop_size, | ||
std=[ | ||
58.395, | ||
57.12, | ||
57.375, | ||
], | ||
type='SegDataPreProcessor'), | ||
decode_head=dict( | ||
align_corners=False, | ||
channels=512, | ||
dropout_ratio=0.1, | ||
in_channels=2048, | ||
in_index=3, | ||
loss_decode=dict( | ||
loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=False), | ||
norm_cfg=dict(requires_grad=True, type='BN'), | ||
num_classes=5, | ||
pool_scales=( | ||
1, | ||
2, | ||
3, | ||
6, | ||
), | ||
type='PSPHead'), | ||
pretrained='open-mmlab://resnet50_v1c', | ||
test_cfg=dict(mode='whole'), | ||
train_cfg=dict(), | ||
type='EncoderDecoder') | ||
norm_cfg = dict(requires_grad=True, type='BN') | ||
optim_wrapper = dict( | ||
clip_grad=None, | ||
optimizer=dict(lr=0.01, momentum=0.9, type='SGD', weight_decay=0.0005), | ||
type='OptimWrapper') | ||
optimizer = dict(lr=0.01, momentum=0.9, type='SGD', weight_decay=0.0005) | ||
param_scheduler = [ | ||
dict( | ||
begin=0, | ||
by_epoch=False, | ||
end=40000, | ||
eta_min=0.0001, | ||
power=0.9, | ||
type='PolyLR'), | ||
] | ||
resume = False | ||
seed = 0 | ||
test_cfg = dict(type='TestLoop') | ||
test_dataloader = dict( | ||
batch_size=1, | ||
dataset=dict( | ||
data_prefix=dict( | ||
img_path='img_dir', seg_map_path='ann_dir'), | ||
data_root=data_root, | ||
pipeline=[ | ||
dict(type='LoadImageFromFile'), | ||
dict(keep_ratio=True, scale=( | ||
2048, | ||
1024, | ||
), type='Resize'), | ||
dict(type='LoadAnnotations'), | ||
dict(type='PackSegInputs'), | ||
], | ||
type=dataset_type), | ||
num_workers=4, | ||
persistent_workers=True, | ||
sampler=dict(shuffle=False, type='DefaultSampler')) | ||
test_evaluator = dict( | ||
iou_metrics=[ | ||
'mIoU', | ||
], type='IoUMetric') | ||
test_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict(keep_ratio=True, scale=( | ||
2048, | ||
1024, | ||
), type='Resize'), | ||
dict(type='LoadAnnotations'), | ||
dict(type='PackSegInputs'), | ||
] | ||
train_cfg = dict(max_iters=1000, type='IterBasedTrainLoop', val_interval=100) | ||
# train_cfg = dict(max_iters=40000, type='IterBasedTrainLoop', val_interval=4000) | ||
train_dataloader = dict( | ||
batch_size=2, | ||
drop_last=False, | ||
dataset=dict( | ||
data_prefix=dict( | ||
img_path='img_dir', seg_map_path='ann_dir'), | ||
data_root=data_root, | ||
pipeline=[ | ||
dict(type='LoadImageFromFile'), | ||
dict(type='LoadAnnotations'), | ||
dict( | ||
keep_ratio=True, | ||
ratio_range=( | ||
0.5, | ||
2.0, | ||
), | ||
scale=( | ||
2048, | ||
1024, | ||
), | ||
type='RandomResize'), | ||
dict( | ||
cat_max_ratio=0.75, crop_size=crop_size, type='RandomCrop'), | ||
dict(prob=0.5, type='RandomFlip'), | ||
dict(type='PhotoMetricDistortion'), | ||
dict(type='PackSegInputs'), | ||
], | ||
type=dataset_type), | ||
num_workers=2, | ||
persistent_workers=True, | ||
sampler=dict(shuffle=True, type='InfiniteSampler')) | ||
train_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict(type='LoadAnnotations'), | ||
dict( | ||
keep_ratio=True, | ||
ratio_range=( | ||
0.5, | ||
2.0, | ||
), | ||
scale=( | ||
2048, | ||
1024, | ||
), | ||
type='RandomResize'), | ||
dict(cat_max_ratio=0.75, crop_size=crop_size, type='RandomCrop'), | ||
dict(prob=0.5, type='RandomFlip'), | ||
dict(type='PhotoMetricDistortion'), | ||
dict(type='PackSegInputs'), | ||
] | ||
tta_model = dict(type='SegTTAModel') | ||
tta_pipeline = [ | ||
dict(backend_args=None, type='LoadImageFromFile'), | ||
dict( | ||
transforms=[ | ||
[ | ||
dict(keep_ratio=True, scale_factor=0.5, type='Resize'), | ||
dict(keep_ratio=True, scale_factor=0.75, type='Resize'), | ||
dict(keep_ratio=True, scale_factor=1.0, type='Resize'), | ||
dict(keep_ratio=True, scale_factor=1.25, type='Resize'), | ||
dict(keep_ratio=True, scale_factor=1.5, type='Resize'), | ||
dict(keep_ratio=True, scale_factor=1.75, type='Resize'), | ||
], | ||
[ | ||
dict(direction='horizontal', prob=0.0, type='RandomFlip'), | ||
dict(direction='horizontal', prob=1.0, type='RandomFlip'), | ||
], | ||
[ | ||
dict(type='LoadAnnotations'), | ||
], | ||
[ | ||
dict(type='PackSegInputs'), | ||
], | ||
], | ||
type='TestTimeAug'), | ||
] | ||
val_cfg = dict(type='ValLoop') | ||
val_dataloader = dict( | ||
batch_size=1, | ||
dataset=dict( | ||
data_prefix=dict( | ||
img_path='img_dir', seg_map_path='ann_dir'), | ||
data_root=data_root, | ||
pipeline=[ | ||
dict(type='LoadImageFromFile'), | ||
dict(keep_ratio=True, scale=( | ||
2048, | ||
1024, | ||
), type='Resize'), | ||
dict(type='LoadAnnotations'), | ||
dict(type='PackSegInputs'), | ||
], | ||
type=dataset_type), | ||
num_workers=4, | ||
persistent_workers=True, | ||
sampler=dict(shuffle=False, type='DefaultSampler')) | ||
val_evaluator = dict( | ||
iou_metrics=[ | ||
'mIoU', | ||
], type='IoUMetric') | ||
vis_backends = [ | ||
dict(type='LocalVisBackend'), | ||
] | ||
visualizer = dict( | ||
name='visualizer', | ||
type='SegLocalVisualizer', | ||
vis_backends=[ | ||
dict(type='LocalVisBackend'), | ||
]) | ||
work_dir = './work_dirs/zero_mould_v1' |
Oops, something went wrong.