Upload 27 files
Browse files- configs/_base_/datasets/.ipynb_checkpoints/my_acdc-checkpoint.py +62 -0
- configs/_base_/datasets/ade20k.py +54 -0
- configs/_base_/datasets/ade20k_640x640.py +54 -0
- configs/_base_/datasets/cardiac.py +73 -0
- configs/_base_/datasets/chase_db1.py +60 -0
- configs/_base_/datasets/cityscapes.py +54 -0
- configs/_base_/datasets/cityscapes_1024x1024.py +35 -0
- configs/_base_/datasets/cityscapes_768x768.py +35 -0
- configs/_base_/datasets/cityscapes_769x769.py +35 -0
- configs/_base_/datasets/cityscapes_832x832.py +35 -0
- configs/_base_/datasets/coco-stuff10k.py +57 -0
- configs/_base_/datasets/coco-stuff164k.py +54 -0
- configs/_base_/datasets/drive.py +59 -0
- configs/_base_/datasets/hrf.py +59 -0
- configs/_base_/datasets/isaid.py +62 -0
- configs/_base_/datasets/laseg2013.py +69 -0
- configs/_base_/datasets/loveda.py +54 -0
- configs/_base_/datasets/mmwhs_mri.py +73 -0
- configs/_base_/datasets/my_acdc.py +68 -0
- configs/_base_/datasets/pascal_context.py +60 -0
- configs/_base_/datasets/pascal_context_59.py +60 -0
- configs/_base_/datasets/pascal_voc12.py +57 -0
- configs/_base_/datasets/pascal_voc12_aug.py +9 -0
- configs/_base_/datasets/potsdam.py +54 -0
- configs/_base_/datasets/stare.py +59 -0
- configs/_base_/datasets/task02heart.py +68 -0
- configs/_base_/datasets/vaihingen.py +54 -0
configs/_base_/datasets/.ipynb_checkpoints/my_acdc-checkpoint.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'MyACDCDataset'
|
| 3 |
+
data_root = 'data/ACDC'
|
| 4 |
+
step='s2'
|
| 5 |
+
# 这里的mean和std好像是模型初始化用的
|
| 6 |
+
img_norm_cfg = dict(
|
| 7 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 8 |
+
img_scale = (256,256)
|
| 9 |
+
crop_size = (224,224)
|
| 10 |
+
train_pipeline = [
|
| 11 |
+
dict(type='LoadImageFromFile'),
|
| 12 |
+
dict(type='LoadAnnotations'),
|
| 13 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 14 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 15 |
+
dict(type='RandomFlip', prob=0.5),
|
| 16 |
+
dict(type='PhotoMetricDistortion'),
|
| 17 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 18 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 19 |
+
dict(type='DefaultFormatBundle'),
|
| 20 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 21 |
+
]
|
| 22 |
+
test_pipeline = [
|
| 23 |
+
dict(type='LoadImageFromFile'),
|
| 24 |
+
dict(
|
| 25 |
+
type='MultiScaleFlipAug',
|
| 26 |
+
img_scale=crop_size,
|
| 27 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 28 |
+
flip=False,
|
| 29 |
+
transforms=[
|
| 30 |
+
dict(type='Resize', keep_ratio=False),
|
| 31 |
+
dict(type='RandomFlip'),
|
| 32 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 33 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 34 |
+
dict(type='Collect', keys=['img'])
|
| 35 |
+
])
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
data = dict(
|
| 39 |
+
samples_per_gpu=4,
|
| 40 |
+
workers_per_gpu=4,
|
| 41 |
+
train=dict(
|
| 42 |
+
type='RepeatDataset',
|
| 43 |
+
# s1->530 s2->800
|
| 44 |
+
times=400,
|
| 45 |
+
dataset=dict(
|
| 46 |
+
type=dataset_type,
|
| 47 |
+
data_root=data_root,
|
| 48 |
+
img_dir=step+'/images/training',
|
| 49 |
+
ann_dir=step+'/annotations/training',
|
| 50 |
+
pipeline=train_pipeline)),
|
| 51 |
+
val=dict(
|
| 52 |
+
type=dataset_type,
|
| 53 |
+
data_root=data_root,
|
| 54 |
+
img_dir=step+'/images/validation',
|
| 55 |
+
ann_dir=step+'/annotations/validation',
|
| 56 |
+
pipeline=test_pipeline),
|
| 57 |
+
test=dict(
|
| 58 |
+
type=dataset_type,
|
| 59 |
+
data_root=data_root,
|
| 60 |
+
img_dir=step+'/images/validation',
|
| 61 |
+
ann_dir=step+'/annotations/validation',
|
| 62 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/ade20k.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'ADE20KDataset'
|
| 3 |
+
data_root = 'data/ade/ADEChallengeData2016'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='images/training',
|
| 41 |
+
ann_dir='annotations/training',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='images/validation',
|
| 47 |
+
ann_dir='annotations/validation',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='images/validation',
|
| 53 |
+
ann_dir='annotations/validation',
|
| 54 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/ade20k_640x640.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'ADE20KDataset'
|
| 3 |
+
data_root = 'data/ade/ADEChallengeData2016'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (640, 640)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 10 |
+
dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2560, 640),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='images/training',
|
| 41 |
+
ann_dir='annotations/training',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='images/validation',
|
| 47 |
+
ann_dir='annotations/validation',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='images/validation',
|
| 53 |
+
ann_dir='annotations/validation',
|
| 54 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/cardiac.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'CardiacDataset'
|
| 3 |
+
data_root = 'data/mr-cardiac'
|
| 4 |
+
# 偶尔会修改
|
| 5 |
+
step='s1'
|
| 6 |
+
|
| 7 |
+
# 这里的mean和std好像是模型初始化用的
|
| 8 |
+
img_norm_cfg = dict(
|
| 9 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 10 |
+
|
| 11 |
+
# 偶尔会修改
|
| 12 |
+
img_scale = (300,300)
|
| 13 |
+
crop_size = (224,224)
|
| 14 |
+
|
| 15 |
+
train_pipeline = [
|
| 16 |
+
dict(type='LoadImageFromFile'),
|
| 17 |
+
dict(type='LoadAnnotations',reduce_zero_label=False),
|
| 18 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0),keep_ratio=False),
|
| 19 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 20 |
+
dict(type='RandomFlip', prob=0.5),
|
| 21 |
+
dict(type='PhotoMetricDistortion'),
|
| 22 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 23 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 24 |
+
dict(type='DefaultFormatBundle'),
|
| 25 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 26 |
+
]
|
| 27 |
+
test_pipeline = [
|
| 28 |
+
dict(type='LoadImageFromFile'),
|
| 29 |
+
dict(
|
| 30 |
+
type='MultiScaleFlipAug',
|
| 31 |
+
img_scale=crop_size,
|
| 32 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 33 |
+
flip=False,
|
| 34 |
+
transforms=[
|
| 35 |
+
dict(type='Resize', keep_ratio=False),
|
| 36 |
+
dict(type='RandomFlip'),
|
| 37 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 38 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 39 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 40 |
+
dict(type='Collect', keys=['img'])
|
| 41 |
+
])
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
data = dict(
|
| 45 |
+
samples_per_gpu=4,
|
| 46 |
+
workers_per_gpu=4,
|
| 47 |
+
shuffle=True,
|
| 48 |
+
train=dict(
|
| 49 |
+
type='RepeatDataset',
|
| 50 |
+
# s1->530 s2->800
|
| 51 |
+
times=40,
|
| 52 |
+
dataset=dict(
|
| 53 |
+
type=dataset_type,
|
| 54 |
+
data_root=data_root,
|
| 55 |
+
img_dir=['train/image',
|
| 56 |
+
#'aug_train/image'
|
| 57 |
+
],
|
| 58 |
+
ann_dir=['train/label',
|
| 59 |
+
#'aug_train/label'
|
| 60 |
+
],
|
| 61 |
+
pipeline=train_pipeline)),
|
| 62 |
+
val=dict(
|
| 63 |
+
type=dataset_type,
|
| 64 |
+
data_root=data_root,
|
| 65 |
+
img_dir='test/image',
|
| 66 |
+
ann_dir='test/label',
|
| 67 |
+
pipeline=test_pipeline),
|
| 68 |
+
test=dict(
|
| 69 |
+
type=dataset_type,
|
| 70 |
+
data_root=data_root,
|
| 71 |
+
img_dir='test/image',
|
| 72 |
+
ann_dir='test/label',
|
| 73 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/chase_db1.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'ChaseDB1Dataset'
|
| 3 |
+
data_root = 'data/CHASE_DB1'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (960, 999)
|
| 7 |
+
crop_size = (128, 128)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=1,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
# 重复了40000倍的数据集,因为训练集只有20张
|
| 42 |
+
times=40000,
|
| 43 |
+
dataset=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='images/training',
|
| 47 |
+
ann_dir='annotations/training',
|
| 48 |
+
pipeline=train_pipeline)),
|
| 49 |
+
val=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='images/validation',
|
| 53 |
+
ann_dir='annotations/validation',
|
| 54 |
+
pipeline=test_pipeline),
|
| 55 |
+
test=dict(
|
| 56 |
+
type=dataset_type,
|
| 57 |
+
data_root=data_root,
|
| 58 |
+
img_dir='images/validation',
|
| 59 |
+
ann_dir='annotations/validation',
|
| 60 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/cityscapes.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'CityscapesDataset'
|
| 3 |
+
data_root = 'data/cityscapes/'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 1024)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations'),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 1024),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=2,
|
| 36 |
+
workers_per_gpu=2,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='leftImg8bit/train',
|
| 41 |
+
ann_dir='gtFine/train',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='leftImg8bit/val',
|
| 47 |
+
ann_dir='gtFine/val',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='leftImg8bit/val',
|
| 53 |
+
ann_dir='gtFine/val',
|
| 54 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/cityscapes_1024x1024.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = './cityscapes.py'
|
| 2 |
+
img_norm_cfg = dict(
|
| 3 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 4 |
+
crop_size = (1024, 1024)
|
| 5 |
+
train_pipeline = [
|
| 6 |
+
dict(type='LoadImageFromFile'),
|
| 7 |
+
dict(type='LoadAnnotations'),
|
| 8 |
+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
|
| 9 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 10 |
+
dict(type='RandomFlip', prob=0.5),
|
| 11 |
+
dict(type='PhotoMetricDistortion'),
|
| 12 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 13 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 14 |
+
dict(type='DefaultFormatBundle'),
|
| 15 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 16 |
+
]
|
| 17 |
+
test_pipeline = [
|
| 18 |
+
dict(type='LoadImageFromFile'),
|
| 19 |
+
dict(
|
| 20 |
+
type='MultiScaleFlipAug',
|
| 21 |
+
img_scale=(2048, 1024),
|
| 22 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 23 |
+
flip=False,
|
| 24 |
+
transforms=[
|
| 25 |
+
dict(type='Resize', keep_ratio=True),
|
| 26 |
+
dict(type='RandomFlip'),
|
| 27 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 28 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 29 |
+
dict(type='Collect', keys=['img']),
|
| 30 |
+
])
|
| 31 |
+
]
|
| 32 |
+
data = dict(
|
| 33 |
+
train=dict(pipeline=train_pipeline),
|
| 34 |
+
val=dict(pipeline=test_pipeline),
|
| 35 |
+
test=dict(pipeline=test_pipeline))
|
configs/_base_/datasets/cityscapes_768x768.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = './cityscapes.py'
|
| 2 |
+
img_norm_cfg = dict(
|
| 3 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 4 |
+
crop_size = (768, 768)
|
| 5 |
+
train_pipeline = [
|
| 6 |
+
dict(type='LoadImageFromFile'),
|
| 7 |
+
dict(type='LoadAnnotations'),
|
| 8 |
+
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
|
| 9 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 10 |
+
dict(type='RandomFlip', prob=0.5),
|
| 11 |
+
dict(type='PhotoMetricDistortion'),
|
| 12 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 13 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 14 |
+
dict(type='DefaultFormatBundle'),
|
| 15 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 16 |
+
]
|
| 17 |
+
test_pipeline = [
|
| 18 |
+
dict(type='LoadImageFromFile'),
|
| 19 |
+
dict(
|
| 20 |
+
type='MultiScaleFlipAug',
|
| 21 |
+
img_scale=(2049, 1025),
|
| 22 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 23 |
+
flip=False,
|
| 24 |
+
transforms=[
|
| 25 |
+
dict(type='Resize', keep_ratio=True),
|
| 26 |
+
dict(type='RandomFlip'),
|
| 27 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 28 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 29 |
+
dict(type='Collect', keys=['img']),
|
| 30 |
+
])
|
| 31 |
+
]
|
| 32 |
+
data = dict(
|
| 33 |
+
train=dict(pipeline=train_pipeline),
|
| 34 |
+
val=dict(pipeline=test_pipeline),
|
| 35 |
+
test=dict(pipeline=test_pipeline))
|
configs/_base_/datasets/cityscapes_769x769.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = './cityscapes.py'
|
| 2 |
+
img_norm_cfg = dict(
|
| 3 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 4 |
+
crop_size = (769, 769)
|
| 5 |
+
train_pipeline = [
|
| 6 |
+
dict(type='LoadImageFromFile'),
|
| 7 |
+
dict(type='LoadAnnotations'),
|
| 8 |
+
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
|
| 9 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 10 |
+
dict(type='RandomFlip', prob=0.5),
|
| 11 |
+
dict(type='PhotoMetricDistortion'),
|
| 12 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 13 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 14 |
+
dict(type='DefaultFormatBundle'),
|
| 15 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 16 |
+
]
|
| 17 |
+
test_pipeline = [
|
| 18 |
+
dict(type='LoadImageFromFile'),
|
| 19 |
+
dict(
|
| 20 |
+
type='MultiScaleFlipAug',
|
| 21 |
+
img_scale=(2049, 1025),
|
| 22 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 23 |
+
flip=False,
|
| 24 |
+
transforms=[
|
| 25 |
+
dict(type='Resize', keep_ratio=True),
|
| 26 |
+
dict(type='RandomFlip'),
|
| 27 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 28 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 29 |
+
dict(type='Collect', keys=['img']),
|
| 30 |
+
])
|
| 31 |
+
]
|
| 32 |
+
data = dict(
|
| 33 |
+
train=dict(pipeline=train_pipeline),
|
| 34 |
+
val=dict(pipeline=test_pipeline),
|
| 35 |
+
test=dict(pipeline=test_pipeline))
|
configs/_base_/datasets/cityscapes_832x832.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = './cityscapes.py'
|
| 2 |
+
img_norm_cfg = dict(
|
| 3 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 4 |
+
crop_size = (832, 832)
|
| 5 |
+
train_pipeline = [
|
| 6 |
+
dict(type='LoadImageFromFile'),
|
| 7 |
+
dict(type='LoadAnnotations'),
|
| 8 |
+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
|
| 9 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 10 |
+
dict(type='RandomFlip', prob=0.5),
|
| 11 |
+
dict(type='PhotoMetricDistortion'),
|
| 12 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 13 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 14 |
+
dict(type='DefaultFormatBundle'),
|
| 15 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 16 |
+
]
|
| 17 |
+
test_pipeline = [
|
| 18 |
+
dict(type='LoadImageFromFile'),
|
| 19 |
+
dict(
|
| 20 |
+
type='MultiScaleFlipAug',
|
| 21 |
+
img_scale=(2048, 1024),
|
| 22 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 23 |
+
flip=False,
|
| 24 |
+
transforms=[
|
| 25 |
+
dict(type='Resize', keep_ratio=True),
|
| 26 |
+
dict(type='RandomFlip'),
|
| 27 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 28 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 29 |
+
dict(type='Collect', keys=['img']),
|
| 30 |
+
])
|
| 31 |
+
]
|
| 32 |
+
data = dict(
|
| 33 |
+
train=dict(pipeline=train_pipeline),
|
| 34 |
+
val=dict(pipeline=test_pipeline),
|
| 35 |
+
test=dict(pipeline=test_pipeline))
|
configs/_base_/datasets/coco-stuff10k.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'COCOStuffDataset'
|
| 3 |
+
data_root = 'data/coco_stuff10k'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
reduce_zero_label=True,
|
| 41 |
+
img_dir='images/train2014',
|
| 42 |
+
ann_dir='annotations/train2014',
|
| 43 |
+
pipeline=train_pipeline),
|
| 44 |
+
val=dict(
|
| 45 |
+
type=dataset_type,
|
| 46 |
+
data_root=data_root,
|
| 47 |
+
reduce_zero_label=True,
|
| 48 |
+
img_dir='images/test2014',
|
| 49 |
+
ann_dir='annotations/test2014',
|
| 50 |
+
pipeline=test_pipeline),
|
| 51 |
+
test=dict(
|
| 52 |
+
type=dataset_type,
|
| 53 |
+
data_root=data_root,
|
| 54 |
+
reduce_zero_label=True,
|
| 55 |
+
img_dir='images/test2014',
|
| 56 |
+
ann_dir='annotations/test2014',
|
| 57 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/coco-stuff164k.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'COCOStuffDataset'
|
| 3 |
+
data_root = 'data/coco_stuff164k'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations'),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='images/train2017',
|
| 41 |
+
ann_dir='annotations/train2017',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='images/val2017',
|
| 47 |
+
ann_dir='annotations/val2017',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='images/val2017',
|
| 53 |
+
ann_dir='annotations/val2017',
|
| 54 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/drive.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'DRIVEDataset'
|
| 3 |
+
data_root = 'data/DRIVE'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (584, 565)
|
| 7 |
+
crop_size = (64, 64)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/hrf.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'HRFDataset'
|
| 3 |
+
data_root = 'data/HRF'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (2336, 3504)
|
| 7 |
+
crop_size = (256, 256)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/isaid.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'iSAIDDataset'
|
| 3 |
+
data_root = 'data/iSAID'
|
| 4 |
+
|
| 5 |
+
img_norm_cfg = dict(
|
| 6 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 7 |
+
"""
|
| 8 |
+
This crop_size setting is followed by the implementation of
|
| 9 |
+
`PointFlow: Flowing Semantics Through Points for Aerial Image
|
| 10 |
+
Segmentation <https://arxiv.org/pdf/2103.06564.pdf>`_.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
crop_size = (896, 896)
|
| 14 |
+
|
| 15 |
+
train_pipeline = [
|
| 16 |
+
dict(type='LoadImageFromFile'),
|
| 17 |
+
dict(type='LoadAnnotations'),
|
| 18 |
+
dict(type='Resize', img_scale=(896, 896), ratio_range=(0.5, 2.0)),
|
| 19 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 20 |
+
dict(type='RandomFlip', prob=0.5),
|
| 21 |
+
dict(type='PhotoMetricDistortion'),
|
| 22 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 23 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 24 |
+
dict(type='DefaultFormatBundle'),
|
| 25 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 26 |
+
]
|
| 27 |
+
test_pipeline = [
|
| 28 |
+
dict(type='LoadImageFromFile'),
|
| 29 |
+
dict(
|
| 30 |
+
type='MultiScaleFlipAug',
|
| 31 |
+
img_scale=(896, 896),
|
| 32 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 33 |
+
flip=False,
|
| 34 |
+
transforms=[
|
| 35 |
+
dict(type='Resize', keep_ratio=True),
|
| 36 |
+
dict(type='RandomFlip'),
|
| 37 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 38 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 39 |
+
dict(type='Collect', keys=['img']),
|
| 40 |
+
])
|
| 41 |
+
]
|
| 42 |
+
data = dict(
|
| 43 |
+
samples_per_gpu=4,
|
| 44 |
+
workers_per_gpu=4,
|
| 45 |
+
train=dict(
|
| 46 |
+
type=dataset_type,
|
| 47 |
+
data_root=data_root,
|
| 48 |
+
img_dir='img_dir/train',
|
| 49 |
+
ann_dir='ann_dir/train',
|
| 50 |
+
pipeline=train_pipeline),
|
| 51 |
+
val=dict(
|
| 52 |
+
type=dataset_type,
|
| 53 |
+
data_root=data_root,
|
| 54 |
+
img_dir='img_dir/val',
|
| 55 |
+
ann_dir='ann_dir/val',
|
| 56 |
+
pipeline=test_pipeline),
|
| 57 |
+
test=dict(
|
| 58 |
+
type=dataset_type,
|
| 59 |
+
data_root=data_root,
|
| 60 |
+
img_dir='img_dir/val',
|
| 61 |
+
ann_dir='ann_dir/val',
|
| 62 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/laseg2013.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'MyLAseg2013Dataset'
|
| 3 |
+
data_root = 'data/LAseg2013'
|
| 4 |
+
# 偶尔会修改
|
| 5 |
+
step='s1'
|
| 6 |
+
|
| 7 |
+
# 这里的mean和std好像是模型初始化用的
|
| 8 |
+
img_norm_cfg = dict(
|
| 9 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 10 |
+
|
| 11 |
+
# 偶尔会修改
|
| 12 |
+
img_scale = (320,320)
|
| 13 |
+
crop_size = (256,256)
|
| 14 |
+
|
| 15 |
+
train_pipeline = [
|
| 16 |
+
dict(type='LoadImageFromFile'),
|
| 17 |
+
dict(type='LoadAnnotations'),
|
| 18 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 19 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 20 |
+
dict(type='RandomFlip', prob=0.5),
|
| 21 |
+
dict(type='PhotoMetricDistortion'),
|
| 22 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 23 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 24 |
+
dict(type='DefaultFormatBundle'),
|
| 25 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 26 |
+
]
|
| 27 |
+
test_pipeline = [
|
| 28 |
+
dict(type='LoadImageFromFile'),
|
| 29 |
+
dict(
|
| 30 |
+
type='MultiScaleFlipAug',
|
| 31 |
+
img_scale=crop_size,
|
| 32 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 33 |
+
flip=False,
|
| 34 |
+
transforms=[
|
| 35 |
+
dict(type='Resize', keep_ratio=False),
|
| 36 |
+
dict(type='RandomFlip'),
|
| 37 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 38 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 39 |
+
dict(type='Collect', keys=['img'])
|
| 40 |
+
])
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
data = dict(
|
| 44 |
+
samples_per_gpu=4,
|
| 45 |
+
workers_per_gpu=4,
|
| 46 |
+
shuffle=True,
|
| 47 |
+
|
| 48 |
+
train=dict(
|
| 49 |
+
type='RepeatDataset',
|
| 50 |
+
# s1->530 s2->800
|
| 51 |
+
times=1,
|
| 52 |
+
dataset=dict(
|
| 53 |
+
type=dataset_type,
|
| 54 |
+
data_root=data_root,
|
| 55 |
+
img_dir=['mri_precise_test_2d/image','mri_precise_train_2d/image'],
|
| 56 |
+
ann_dir=['mri_precise_test_2d/label','mri_precise_train_2d/label'],
|
| 57 |
+
pipeline=train_pipeline)),
|
| 58 |
+
val=dict(
|
| 59 |
+
type=dataset_type,
|
| 60 |
+
data_root=data_root,
|
| 61 |
+
img_dir='mri_precise_test_2d/image',
|
| 62 |
+
ann_dir='mri_precise_test_2d/label',
|
| 63 |
+
pipeline=test_pipeline),
|
| 64 |
+
test=dict(
|
| 65 |
+
type=dataset_type,
|
| 66 |
+
data_root=data_root,
|
| 67 |
+
img_dir='mri_test_2d/image',
|
| 68 |
+
ann_dir='mri_test_2d/label',
|
| 69 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/loveda.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'LoveDADataset'
|
| 3 |
+
data_root = 'data/loveDA'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(1024, 1024),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='img_dir/train',
|
| 41 |
+
ann_dir='ann_dir/train',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='img_dir/val',
|
| 47 |
+
ann_dir='ann_dir/val',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='img_dir/val',
|
| 53 |
+
ann_dir='ann_dir/val',
|
| 54 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/mmwhs_mri.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'CardiacDataset'
|
| 3 |
+
data_root = 'data/mr-cardiac'
|
| 4 |
+
# 偶尔会修改
|
| 5 |
+
step = 's1'
|
| 6 |
+
|
| 7 |
+
# 这里的mean和std好像是模型初始化用的
|
| 8 |
+
img_norm_cfg = dict(
|
| 9 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 10 |
+
|
| 11 |
+
# 偶尔会修改
|
| 12 |
+
img_scale = (300, 300)
|
| 13 |
+
crop_size = (256, 256)
|
| 14 |
+
|
| 15 |
+
train_pipeline = [
|
| 16 |
+
dict(type='LoadImageFromFile'),
|
| 17 |
+
dict(type='LoadAnnotations', reduce_zero_label=False),
|
| 18 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0),
|
| 19 |
+
keep_ratio=False),
|
| 20 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 21 |
+
dict(type='RandomFlip', prob=0.5),
|
| 22 |
+
dict(type='PhotoMetricDistortion'),
|
| 23 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 24 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 25 |
+
dict(type='DefaultFormatBundle'),
|
| 26 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 27 |
+
]
|
| 28 |
+
test_pipeline = [
|
| 29 |
+
dict(type='LoadImageFromFile'),
|
| 30 |
+
dict(
|
| 31 |
+
type='MultiScaleFlipAug',
|
| 32 |
+
img_scale=crop_size,
|
| 33 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 34 |
+
flip=False,
|
| 35 |
+
transforms=[
|
| 36 |
+
dict(type='Resize', keep_ratio=False),
|
| 37 |
+
dict(type='RandomFlip'),
|
| 38 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 39 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 40 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 41 |
+
dict(type='Collect', keys=['img'])
|
| 42 |
+
])
|
| 43 |
+
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
data = dict(
|
| 47 |
+
samples_per_gpu=4,
|
| 48 |
+
workers_per_gpu=4,
|
| 49 |
+
shuffle=True,
|
| 50 |
+
train=dict(
|
| 51 |
+
type='RepeatDataset',
|
| 52 |
+
# s1->530 s2->800
|
| 53 |
+
times=40,
|
| 54 |
+
dataset=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
#img_dir=['mri_train_2d/image', 'mri_aug_2d/image','mri_test_2d/image'],
|
| 58 |
+
#ann_dir=['mri_train_2d/label', 'mri_aug_2d/label','mri_test_2d/image'],
|
| 59 |
+
img_dir=['mri_train_2d/image','mri_test_2d/image'],
|
| 60 |
+
ann_dir=['mri_train_2d/label','mri_test_2d/label'],
|
| 61 |
+
pipeline=train_pipeline)),
|
| 62 |
+
val=dict(
|
| 63 |
+
type=dataset_type,
|
| 64 |
+
data_root=data_root,
|
| 65 |
+
img_dir='mri_test_2d/image',
|
| 66 |
+
ann_dir='mri_test_2d/label',
|
| 67 |
+
pipeline=test_pipeline),
|
| 68 |
+
test=dict(
|
| 69 |
+
type=dataset_type,
|
| 70 |
+
data_root=data_root,
|
| 71 |
+
img_dir='mri_test_2d/image',
|
| 72 |
+
ann_dir='mri_test_2d/label',
|
| 73 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/my_acdc.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'MyACDCDataset'
|
| 3 |
+
data_root = 'data/ACDC'
|
| 4 |
+
# 偶尔会修改
|
| 5 |
+
step='s1'
|
| 6 |
+
|
| 7 |
+
# 这里的mean和std好像是模型初始化用的
|
| 8 |
+
img_norm_cfg = dict(
|
| 9 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 10 |
+
|
| 11 |
+
# 偶尔会修改
|
| 12 |
+
img_scale = (300,300)
|
| 13 |
+
crop_size = (256,256)
|
| 14 |
+
|
| 15 |
+
train_pipeline = [
|
| 16 |
+
dict(type='LoadImageFromFile'),
|
| 17 |
+
dict(type='LoadAnnotations'),
|
| 18 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 19 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 20 |
+
dict(type='RandomFlip', prob=0.5),
|
| 21 |
+
dict(type='PhotoMetricDistortion'),
|
| 22 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 23 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 24 |
+
dict(type='DefaultFormatBundle'),
|
| 25 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 26 |
+
]
|
| 27 |
+
test_pipeline = [
|
| 28 |
+
dict(type='LoadImageFromFile'),
|
| 29 |
+
dict(
|
| 30 |
+
type='MultiScaleFlipAug',
|
| 31 |
+
img_scale=crop_size,
|
| 32 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 33 |
+
flip=False,
|
| 34 |
+
transforms=[
|
| 35 |
+
dict(type='Resize', keep_ratio=False),
|
| 36 |
+
dict(type='RandomFlip'),
|
| 37 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 38 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 39 |
+
dict(type='Collect', keys=['img'])
|
| 40 |
+
])
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
data = dict(
|
| 44 |
+
samples_per_gpu=4,
|
| 45 |
+
workers_per_gpu=4,
|
| 46 |
+
shuffle=True,
|
| 47 |
+
train=dict(
|
| 48 |
+
type='RepeatDataset',
|
| 49 |
+
# s1->530 s2->800
|
| 50 |
+
times=100,
|
| 51 |
+
dataset=dict(
|
| 52 |
+
type=dataset_type,
|
| 53 |
+
data_root=data_root,
|
| 54 |
+
img_dir=[step+'/images/training',step+'/images/validation'],
|
| 55 |
+
ann_dir=[step+'/annotations/training',step+'/annotations/validation'],
|
| 56 |
+
pipeline=train_pipeline)),
|
| 57 |
+
val=dict(
|
| 58 |
+
type=dataset_type,
|
| 59 |
+
data_root=data_root,
|
| 60 |
+
img_dir=step+'/images/validation',
|
| 61 |
+
ann_dir=step+'/annotations/validation',
|
| 62 |
+
pipeline=test_pipeline),
|
| 63 |
+
test=dict(
|
| 64 |
+
type=dataset_type,
|
| 65 |
+
data_root=data_root,
|
| 66 |
+
img_dir=step+'/images/validation',
|
| 67 |
+
ann_dir=step+'/annotations/validation',
|
| 68 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/pascal_context.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PascalContextDataset'
|
| 3 |
+
data_root = 'data/VOCdevkit/VOC2010/'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
|
| 7 |
+
img_scale = (520, 520)
|
| 8 |
+
crop_size = (480, 480)
|
| 9 |
+
|
| 10 |
+
train_pipeline = [
|
| 11 |
+
dict(type='LoadImageFromFile'),
|
| 12 |
+
dict(type='LoadAnnotations'),
|
| 13 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 14 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 15 |
+
dict(type='RandomFlip', prob=0.5),
|
| 16 |
+
dict(type='PhotoMetricDistortion'),
|
| 17 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 18 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 19 |
+
dict(type='DefaultFormatBundle'),
|
| 20 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 21 |
+
]
|
| 22 |
+
test_pipeline = [
|
| 23 |
+
dict(type='LoadImageFromFile'),
|
| 24 |
+
dict(
|
| 25 |
+
type='MultiScaleFlipAug',
|
| 26 |
+
img_scale=img_scale,
|
| 27 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 28 |
+
flip=False,
|
| 29 |
+
transforms=[
|
| 30 |
+
dict(type='Resize', keep_ratio=True),
|
| 31 |
+
dict(type='RandomFlip'),
|
| 32 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 33 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 34 |
+
dict(type='Collect', keys=['img']),
|
| 35 |
+
])
|
| 36 |
+
]
|
| 37 |
+
data = dict(
|
| 38 |
+
samples_per_gpu=4,
|
| 39 |
+
workers_per_gpu=4,
|
| 40 |
+
train=dict(
|
| 41 |
+
type=dataset_type,
|
| 42 |
+
data_root=data_root,
|
| 43 |
+
img_dir='JPEGImages',
|
| 44 |
+
ann_dir='SegmentationClassContext',
|
| 45 |
+
split='ImageSets/SegmentationContext/train.txt',
|
| 46 |
+
pipeline=train_pipeline),
|
| 47 |
+
val=dict(
|
| 48 |
+
type=dataset_type,
|
| 49 |
+
data_root=data_root,
|
| 50 |
+
img_dir='JPEGImages',
|
| 51 |
+
ann_dir='SegmentationClassContext',
|
| 52 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='JPEGImages',
|
| 58 |
+
ann_dir='SegmentationClassContext',
|
| 59 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 60 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/pascal_context_59.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PascalContextDataset59'
|
| 3 |
+
data_root = 'data/VOCdevkit/VOC2010/'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
|
| 7 |
+
img_scale = (520, 520)
|
| 8 |
+
crop_size = (480, 480)
|
| 9 |
+
|
| 10 |
+
train_pipeline = [
|
| 11 |
+
dict(type='LoadImageFromFile'),
|
| 12 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 13 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 14 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 15 |
+
dict(type='RandomFlip', prob=0.5),
|
| 16 |
+
dict(type='PhotoMetricDistortion'),
|
| 17 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 18 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 19 |
+
dict(type='DefaultFormatBundle'),
|
| 20 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 21 |
+
]
|
| 22 |
+
test_pipeline = [
|
| 23 |
+
dict(type='LoadImageFromFile'),
|
| 24 |
+
dict(
|
| 25 |
+
type='MultiScaleFlipAug',
|
| 26 |
+
img_scale=img_scale,
|
| 27 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 28 |
+
flip=False,
|
| 29 |
+
transforms=[
|
| 30 |
+
dict(type='Resize', keep_ratio=True),
|
| 31 |
+
dict(type='RandomFlip'),
|
| 32 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 33 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 34 |
+
dict(type='Collect', keys=['img']),
|
| 35 |
+
])
|
| 36 |
+
]
|
| 37 |
+
data = dict(
|
| 38 |
+
samples_per_gpu=4,
|
| 39 |
+
workers_per_gpu=4,
|
| 40 |
+
train=dict(
|
| 41 |
+
type=dataset_type,
|
| 42 |
+
data_root=data_root,
|
| 43 |
+
img_dir='JPEGImages',
|
| 44 |
+
ann_dir='SegmentationClassContext',
|
| 45 |
+
split='ImageSets/SegmentationContext/train.txt',
|
| 46 |
+
pipeline=train_pipeline),
|
| 47 |
+
val=dict(
|
| 48 |
+
type=dataset_type,
|
| 49 |
+
data_root=data_root,
|
| 50 |
+
img_dir='JPEGImages',
|
| 51 |
+
ann_dir='SegmentationClassContext',
|
| 52 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='JPEGImages',
|
| 58 |
+
ann_dir='SegmentationClassContext',
|
| 59 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 60 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/pascal_voc12.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PascalVOCDataset'
|
| 3 |
+
data_root = 'data/VOCdevkit/VOC2012'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations'),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='JPEGImages',
|
| 41 |
+
ann_dir='SegmentationClass',
|
| 42 |
+
split='ImageSets/Segmentation/train.txt',
|
| 43 |
+
pipeline=train_pipeline),
|
| 44 |
+
val=dict(
|
| 45 |
+
type=dataset_type,
|
| 46 |
+
data_root=data_root,
|
| 47 |
+
img_dir='JPEGImages',
|
| 48 |
+
ann_dir='SegmentationClass',
|
| 49 |
+
split='ImageSets/Segmentation/val.txt',
|
| 50 |
+
pipeline=test_pipeline),
|
| 51 |
+
test=dict(
|
| 52 |
+
type=dataset_type,
|
| 53 |
+
data_root=data_root,
|
| 54 |
+
img_dir='JPEGImages',
|
| 55 |
+
ann_dir='SegmentationClass',
|
| 56 |
+
split='ImageSets/Segmentation/val.txt',
|
| 57 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/pascal_voc12_aug.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = './pascal_voc12.py'
|
| 2 |
+
# dataset settings
|
| 3 |
+
data = dict(
|
| 4 |
+
train=dict(
|
| 5 |
+
ann_dir=['SegmentationClass', 'SegmentationClassAug'],
|
| 6 |
+
split=[
|
| 7 |
+
'ImageSets/Segmentation/train.txt',
|
| 8 |
+
'ImageSets/Segmentation/aug.txt'
|
| 9 |
+
]))
|
configs/_base_/datasets/potsdam.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PotsdamDataset'
|
| 3 |
+
data_root = 'data/potsdam'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 10 |
+
dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(512, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='img_dir/train',
|
| 41 |
+
ann_dir='ann_dir/train',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='img_dir/val',
|
| 47 |
+
ann_dir='ann_dir/val',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='img_dir/val',
|
| 53 |
+
ann_dir='ann_dir/val',
|
| 54 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/stare.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'STAREDataset'
|
| 3 |
+
data_root = 'data/STARE'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (605, 700)
|
| 7 |
+
crop_size = (128, 128)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/task02heart.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'Mytask02heartDataset'
|
| 3 |
+
data_root = 'data/Task02_Heart'
|
| 4 |
+
# 偶尔会修改
|
| 5 |
+
step='s1'
|
| 6 |
+
|
| 7 |
+
# 这里的mean和std好像是模型初始化用的
|
| 8 |
+
img_norm_cfg = dict(
|
| 9 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 10 |
+
|
| 11 |
+
# 偶尔会修改
|
| 12 |
+
img_scale = (320,320)
|
| 13 |
+
crop_size = (256,256)
|
| 14 |
+
|
| 15 |
+
train_pipeline = [
|
| 16 |
+
dict(type='LoadImageFromFile'),
|
| 17 |
+
dict(type='LoadAnnotations'),
|
| 18 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 19 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 20 |
+
dict(type='RandomFlip', prob=0.5),
|
| 21 |
+
dict(type='PhotoMetricDistortion'),
|
| 22 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 23 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 24 |
+
dict(type='DefaultFormatBundle'),
|
| 25 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 26 |
+
]
|
| 27 |
+
test_pipeline = [
|
| 28 |
+
dict(type='LoadImageFromFile'),
|
| 29 |
+
dict(
|
| 30 |
+
type='MultiScaleFlipAug',
|
| 31 |
+
img_scale=crop_size,
|
| 32 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 33 |
+
flip=False,
|
| 34 |
+
transforms=[
|
| 35 |
+
dict(type='Resize', keep_ratio=False),
|
| 36 |
+
dict(type='RandomFlip'),
|
| 37 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 38 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 39 |
+
dict(type='Collect', keys=['img'])
|
| 40 |
+
])
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
data = dict(
|
| 44 |
+
samples_per_gpu=4,
|
| 45 |
+
workers_per_gpu=4,
|
| 46 |
+
shuffle=True,
|
| 47 |
+
train=dict(
|
| 48 |
+
type='RepeatDataset',
|
| 49 |
+
# s1->530 s2->800
|
| 50 |
+
times=1,
|
| 51 |
+
dataset=dict(
|
| 52 |
+
type=dataset_type,
|
| 53 |
+
data_root=data_root,
|
| 54 |
+
img_dir=['mri_z_precise_train_2d/image'],
|
| 55 |
+
ann_dir=['mri_z_precise_train_2d/label'],
|
| 56 |
+
pipeline=train_pipeline)),
|
| 57 |
+
val=dict(
|
| 58 |
+
type=dataset_type,
|
| 59 |
+
data_root=data_root,
|
| 60 |
+
img_dir='mri_z_precise_train_2d/image',
|
| 61 |
+
ann_dir='mri_z_precise_train_2d/label',
|
| 62 |
+
pipeline=test_pipeline),
|
| 63 |
+
test=dict(
|
| 64 |
+
type=dataset_type,
|
| 65 |
+
data_root=data_root,
|
| 66 |
+
img_dir='mri_train_2d/image',
|
| 67 |
+
ann_dir='mri_train_2d/label',
|
| 68 |
+
pipeline=test_pipeline))
|
configs/_base_/datasets/vaihingen.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'ISPRSDataset'
|
| 3 |
+
data_root = 'data/vaihingen'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 10 |
+
dict(type='Resize', img_scale=(512, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(512, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='img_dir/train',
|
| 41 |
+
ann_dir='ann_dir/train',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='img_dir/val',
|
| 47 |
+
ann_dir='ann_dir/val',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='img_dir/val',
|
| 53 |
+
ann_dir='ann_dir/val',
|
| 54 |
+
pipeline=test_pipeline))
|