mytcl commited on
Commit
1bae768
·
verified ·
1 Parent(s): 13c3d93

Upload 7 files

Browse files
laseg_best/20241010_140609/20241010_140609.log ADDED
The diff for this file is too large to render. See raw diff
 
laseg_best/20241010_140609/iter_25000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87721cc6e55acd291f39888c6b9e7ddc0654b950528403d7378a462acf01d011
3
+ size 562100232
laseg_best/20241010_140609/last_checkpoint ADDED
@@ -0,0 +1 @@
 
 
1
+ /home/zhangxingyu/mmlab/mmsegmentation/work_dirs/fcn_psvt_256x256_25k_laseg/iter_25000.pth
laseg_best/20241010_140609/vis_data/20241010_140609.json ADDED
The diff for this file is too large to render. See raw diff
 
laseg_best/20241010_140609/vis_data/config.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoint_file = './work_dirs/fcn_psvt_256x256_80k_cardiac/20240916_112410/iter_80000.pth'
2
+ crop_size = (
3
+ 256,
4
+ 256,
5
+ )
6
+ data_preprocessor = dict(
7
+ bgr_to_rgb=True,
8
+ mean=[
9
+ 123.675,
10
+ 116.28,
11
+ 103.53,
12
+ ],
13
+ pad_val=0,
14
+ seg_pad_val=255,
15
+ size=(
16
+ 256,
17
+ 256,
18
+ ),
19
+ std=[
20
+ 58.395,
21
+ 57.12,
22
+ 57.375,
23
+ ],
24
+ type='SegDataPreProcessor')
25
+ data_root = 'data/LAseg2013'
26
+ dataset_type = 'MyLAseg2013Dataset'
27
+ default_hooks = dict(
28
+ checkpoint=dict(by_epoch=False, interval=2000, type='CheckpointHook'),
29
+ logger=dict(interval=50, log_metric_by_epoch=False, type='LoggerHook'),
30
+ param_scheduler=dict(type='ParamSchedulerHook'),
31
+ sampler_seed=dict(type='DistSamplerSeedHook'),
32
+ timer=dict(type='IterTimerHook'),
33
+ visualization=dict(type='SegVisualizationHook'))
34
+ default_scope = 'mmseg'
35
+ env_cfg = dict(
36
+ cudnn_benchmark=True,
37
+ dist_cfg=dict(backend='nccl'),
38
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
39
+ find_unused_parameters = True
40
+ img_norm_cfg = dict(
41
+ mean=[
42
+ 123.675,
43
+ 116.28,
44
+ 103.53,
45
+ ],
46
+ std=[
47
+ 58.395,
48
+ 57.12,
49
+ 57.375,
50
+ ],
51
+ to_rgb=True)
52
+ img_scale = (
53
+ 300,
54
+ 300,
55
+ )
56
+ launcher = 'pytorch'
57
+ load_from = None
58
+ log_level = 'INFO'
59
+ log_processor = dict(by_epoch=False)
60
+ model = dict(
61
+ backbone=dict(
62
+ ape=False,
63
+ depths=[
64
+ 2,
65
+ 2,
66
+ 6,
67
+ 2,
68
+ ],
69
+ drop_path_rate=0.2,
70
+ img_size=256,
71
+ in_channels=3,
72
+ type='PSVT',
73
+ window_size=[
74
+ 8,
75
+ 8,
76
+ 8,
77
+ 8,
78
+ ]),
79
+ data_preprocessor=dict(
80
+ bgr_to_rgb=True,
81
+ mean=[
82
+ 123.675,
83
+ 116.28,
84
+ 103.53,
85
+ ],
86
+ pad_val=0,
87
+ seg_pad_val=255,
88
+ size=(
89
+ 256,
90
+ 256,
91
+ ),
92
+ std=[
93
+ 58.395,
94
+ 57.12,
95
+ 57.375,
96
+ ],
97
+ type='SegDataPreProcessor'),
98
+ decode_head=dict(
99
+ channels=96,
100
+ in_channels=96,
101
+ in_index=-1,
102
+ loss_decode=[
103
+ dict(
104
+ loss_name='loss_ce', loss_weight=1.0, type='CrossEntropyLoss'),
105
+ dict(loss_name='loss_dice', loss_weight=2.0, type='DiceLoss'),
106
+ ],
107
+ norm_cfg=dict(requires_grad=True, type='SyncBN'),
108
+ num_classes=2,
109
+ type='FCNHead'),
110
+ test_cfg=dict(crop_size=(
111
+ 512,
112
+ 512,
113
+ ), mode='whole', stride=(
114
+ 85,
115
+ 85,
116
+ )),
117
+ train_cfg=dict(),
118
+ type='EncoderDecoder')
119
+ norm_cfg = dict(requires_grad=True, type='SyncBN')
120
+ optim_wrapper = dict(
121
+ clip_grad=None,
122
+ optimizer=dict(lr=0.01, type='AdamW', weight_decay=0.1),
123
+ type='OptimWrapper')
124
+ optimizer = dict(lr=0.01, type='AdamW', weight_decay=0.1)
125
+ param_scheduler = [
126
+ dict(
127
+ begin=0, by_epoch=False, end=12000, start_factor=0.03,
128
+ type='LinearLR'),
129
+ dict(
130
+ begin=12000,
131
+ by_epoch=False,
132
+ end=24000,
133
+ eta_min_ratio=0.03,
134
+ power=0.9,
135
+ type='PolyLRRatio'),
136
+ dict(begin=24000, by_epoch=False, end=25000, factor=1, type='ConstantLR'),
137
+ ]
138
+ resume = False
139
+ step = 's1'
140
+ test_cfg = dict(type='TestLoop')
141
+ test_dataloader = dict(
142
+ batch_size=16,
143
+ dataset=dict(
144
+ data_prefix=dict(
145
+ img_path='mri_test_2d/image', seg_map_path='mri_test_2d/label'),
146
+ data_root='data/LAseg2013',
147
+ pipeline=[
148
+ dict(type='LoadImageFromFile'),
149
+ dict(keep_ratio=False, scale=(
150
+ 256,
151
+ 256,
152
+ ), type='Resize'),
153
+ dict(type='LoadAnnotations'),
154
+ dict(type='PackSegInputs'),
155
+ ],
156
+ type='MyLAseg2013Dataset'),
157
+ drop_last=False,
158
+ num_workers=16,
159
+ persistent_workers=True,
160
+ sampler=dict(shuffle=False, type='DefaultSampler'))
161
+ test_evaluator = dict(
162
+ iou_metrics=[
163
+ 'mIoU',
164
+ ], type='IoUMetric')
165
+ test_pipeline = [
166
+ dict(type='LoadImageFromFile'),
167
+ dict(
168
+ flip=False,
169
+ img_scale=(
170
+ 256,
171
+ 256,
172
+ ),
173
+ transforms=[
174
+ dict(keep_ratio=False, type='Resize'),
175
+ dict(type='RandomFlip'),
176
+ dict(
177
+ mean=[
178
+ 123.675,
179
+ 116.28,
180
+ 103.53,
181
+ ],
182
+ std=[
183
+ 58.395,
184
+ 57.12,
185
+ 57.375,
186
+ ],
187
+ to_rgb=True,
188
+ type='Normalize'),
189
+ dict(keys=[
190
+ 'img',
191
+ ], type='ImageToTensor'),
192
+ dict(keys=[
193
+ 'img',
194
+ ], type='Collect'),
195
+ ],
196
+ type='MultiScaleFlipAug'),
197
+ ]
198
+ train_cfg = dict(max_iters=25000, type='IterBasedTrainLoop', val_interval=1000)
199
+ train_dataloader = dict(
200
+ batch_size=16,
201
+ dataset=dict(
202
+ data_prefix=dict(
203
+ img_path='mri_train_2d/image', seg_map_path='mri_train_2d/label'),
204
+ data_root='data/LAseg2013',
205
+ pipeline=[
206
+ dict(type='LoadImageFromFile'),
207
+ dict(type='LoadAnnotations'),
208
+ dict(
209
+ keep_ratio=True,
210
+ ratio_range=(
211
+ 0.5,
212
+ 2.0,
213
+ ),
214
+ scale=(
215
+ 300,
216
+ 300,
217
+ ),
218
+ type='RandomResize'),
219
+ dict(
220
+ cat_max_ratio=0.75, crop_size=(
221
+ 256,
222
+ 256,
223
+ ), type='RandomCrop'),
224
+ dict(prob=0.5, type='RandomFlip'),
225
+ dict(type='PhotoMetricDistortion'),
226
+ dict(type='PackSegInputs'),
227
+ ],
228
+ type='MyLAseg2013Dataset'),
229
+ num_workers=16,
230
+ persistent_workers=True,
231
+ sampler=dict(shuffle=True, type='InfiniteSampler'))
232
+ train_pipeline = [
233
+ dict(type='LoadImageFromFile'),
234
+ dict(type='LoadAnnotations'),
235
+ dict(img_scale=(
236
+ 300,
237
+ 300,
238
+ ), ratio_range=(
239
+ 0.5,
240
+ 2.0,
241
+ ), type='Resize'),
242
+ dict(cat_max_ratio=0.75, crop_size=(
243
+ 256,
244
+ 256,
245
+ ), type='RandomCrop'),
246
+ dict(prob=0.5, type='RandomFlip'),
247
+ dict(type='PhotoMetricDistortion'),
248
+ dict(
249
+ mean=[
250
+ 123.675,
251
+ 116.28,
252
+ 103.53,
253
+ ],
254
+ std=[
255
+ 58.395,
256
+ 57.12,
257
+ 57.375,
258
+ ],
259
+ to_rgb=True,
260
+ type='Normalize'),
261
+ dict(pad_val=0, seg_pad_val=255, size=(
262
+ 256,
263
+ 256,
264
+ ), type='Pad'),
265
+ dict(type='DefaultFormatBundle'),
266
+ dict(keys=[
267
+ 'img',
268
+ 'gt_semantic_seg',
269
+ ], type='Collect'),
270
+ ]
271
+ tta_model = dict(type='SegTTAModel')
272
+ val_cfg = dict(type='ValLoop')
273
+ val_dataloader = dict(
274
+ batch_size=16,
275
+ dataset=dict(
276
+ data_prefix=dict(
277
+ img_path='mri_test_2d/image', seg_map_path='mri_test_2d/label'),
278
+ data_root='data/LAseg2013',
279
+ pipeline=[
280
+ dict(type='LoadImageFromFile'),
281
+ dict(keep_ratio=False, scale=(
282
+ 256,
283
+ 256,
284
+ ), type='Resize'),
285
+ dict(type='LoadAnnotations'),
286
+ dict(type='PackSegInputs'),
287
+ ],
288
+ type='MyLAseg2013Dataset'),
289
+ drop_last=False,
290
+ num_workers=16,
291
+ persistent_workers=True,
292
+ sampler=dict(shuffle=False, type='DefaultSampler'))
293
+ val_evaluator = dict(
294
+ iou_metrics=[
295
+ 'mIoU',
296
+ ], type='IoUMetric')
297
+ vis_backends = [
298
+ dict(type='LocalVisBackend'),
299
+ dict(type='TensorboardVisBackend'),
300
+ ]
301
+ visualizer = dict(
302
+ name='visualizer',
303
+ type='SegLocalVisualizer',
304
+ vis_backends=[
305
+ dict(type='LocalVisBackend'),
306
+ dict(type='TensorboardVisBackend'),
307
+ ])
308
+ work_dir = './work_dirs/fcn_psvt_256x256_25k_laseg'
laseg_best/20241010_140609/vis_data/events.out.tfevents.1728540372.localhost.localdomain.134804.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13ba645d16720d5d54989dacafb2ed66c8d05ce7ad20928f79790bcc4b618f59
3
+ size 227053
laseg_best/20241010_140609/vis_data/scalars.json ADDED
The diff for this file is too large to render. See raw diff