mytcl commited on
Commit
1c40643
·
verified ·
1 Parent(s): 104e3da

Upload 687 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Task02_Heart/._dataset.json +0 -0
  2. Task02_Heart/._imagesTr +0 -0
  3. Task02_Heart/._imagesTs +0 -0
  4. Task02_Heart/._labelsTr +0 -0
  5. Task02_Heart/.idea/.gitignore +3 -0
  6. Task02_Heart/.idea/.name +1 -0
  7. Task02_Heart/.idea/Task02_Heart.iml +8 -0
  8. Task02_Heart/.idea/deployment.xml +14 -0
  9. Task02_Heart/.idea/inspectionProfiles/profiles_settings.xml +6 -0
  10. Task02_Heart/.idea/misc.xml +4 -0
  11. Task02_Heart/.idea/modules.xml +8 -0
  12. Task02_Heart/.idea/workspace.xml +56 -0
  13. Task02_Heart/__pycache__/half_vnet.cpython-311.pyc +0 -0
  14. Task02_Heart/__pycache__/half_vnet.cpython-38.pyc +0 -0
  15. Task02_Heart/__pycache__/vnet.cpython-311.pyc +0 -0
  16. Task02_Heart/__pycache__/vnet.cpython-38.pyc +0 -0
  17. Task02_Heart/data-augment.py +95 -0
  18. Task02_Heart/data-show.py +171 -0
  19. Task02_Heart/data_proc_for_3d.py +147 -0
  20. Task02_Heart/dataprocess.py +253 -0
  21. Task02_Heart/dataset.json +19 -0
  22. Task02_Heart/get_3d_data.py +0 -0
  23. Task02_Heart/get_precise_seg_part.py +111 -0
  24. Task02_Heart/half_vnet.py +207 -0
  25. Task02_Heart/imagesTr/la_003.nii.gz +3 -0
  26. Task02_Heart/imagesTr/la_004.nii.gz +3 -0
  27. Task02_Heart/imagesTr/la_005.nii.gz +3 -0
  28. Task02_Heart/imagesTr/la_007.nii.gz +3 -0
  29. Task02_Heart/imagesTr/la_009.nii.gz +3 -0
  30. Task02_Heart/imagesTr/la_010.nii.gz +3 -0
  31. Task02_Heart/imagesTr/la_011.nii.gz +3 -0
  32. Task02_Heart/imagesTr/la_014.nii.gz +3 -0
  33. Task02_Heart/imagesTr/la_016.nii.gz +3 -0
  34. Task02_Heart/imagesTr/la_017.nii.gz +3 -0
  35. Task02_Heart/imagesTr/la_018.nii.gz +3 -0
  36. Task02_Heart/imagesTr/la_019.nii.gz +3 -0
  37. Task02_Heart/imagesTr/la_020.nii.gz +3 -0
  38. Task02_Heart/imagesTr/la_021.nii.gz +3 -0
  39. Task02_Heart/imagesTr/la_022.nii.gz +3 -0
  40. Task02_Heart/imagesTr/la_023.nii.gz +3 -0
  41. Task02_Heart/imagesTr/la_024.nii.gz +3 -0
  42. Task02_Heart/imagesTr/la_026.nii.gz +3 -0
  43. Task02_Heart/imagesTr/la_029.nii.gz +3 -0
  44. Task02_Heart/imagesTr/la_030.nii.gz +3 -0
  45. Task02_Heart/imagesTr/pos_labels.npy +3 -0
  46. Task02_Heart/imagesTs/la_001.nii.gz +3 -0
  47. Task02_Heart/imagesTs/la_002.nii.gz +3 -0
  48. Task02_Heart/imagesTs/la_006.nii.gz +3 -0
  49. Task02_Heart/imagesTs/la_008.nii.gz +3 -0
  50. Task02_Heart/imagesTs/la_012.nii.gz +3 -0
Task02_Heart/._dataset.json ADDED
Binary file (176 Bytes). View file
 
Task02_Heart/._imagesTr ADDED
Binary file (120 Bytes). View file
 
Task02_Heart/._imagesTs ADDED
Binary file (120 Bytes). View file
 
Task02_Heart/._labelsTr ADDED
Binary file (120 Bytes). View file
 
Task02_Heart/.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bfdece3645ed8ed356030f22cc2004dc3f401ff060ac3d24de811c3bcd82e16
3
+ size 184
Task02_Heart/.idea/.name ADDED
@@ -0,0 +1 @@
 
 
1
+ show_image_and_label.py
Task02_Heart/.idea/Task02_Heart.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
Task02_Heart/.idea/deployment.xml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="PublishConfigData" remoteFilesAllowedToDisappearOnAutoupload="false">
4
+ <serverData>
5
+ <paths name="root@connect.yza1.seetacloud.com:49263 password">
6
+ <serverdata>
7
+ <mappings>
8
+ <mapping local="$PROJECT_DIR$" web="/" />
9
+ </mappings>
10
+ </serverdata>
11
+ </paths>
12
+ </serverData>
13
+ </component>
14
+ </project>
Task02_Heart/.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
Task02_Heart/.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" />
4
+ </project>
Task02_Heart/.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/Task02_Heart.iml" filepath="$PROJECT_DIR$/.idea/Task02_Heart.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
Task02_Heart/.idea/workspace.xml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="6367f652-10e6-41bd-8155-b4eab4e93752" name="Changes" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="ProjectColorInfo"><![CDATA[{
14
+ "customColor": "",
15
+ "associatedIndex": 4
16
+ }]]></component>
17
+ <component name="ProjectId" id="2mpPRTNPZxr7pyzWSgBrLAOiNJt" />
18
+ <component name="ProjectViewState">
19
+ <option name="hideEmptyMiddlePackages" value="true" />
20
+ <option name="showLibraryContents" value="true" />
21
+ </component>
22
+ <component name="PropertiesComponent"><![CDATA[{
23
+ "keyToString": {
24
+ "RunOnceActivity.OpenProjectViewOnStart": "true",
25
+ "RunOnceActivity.ShowReadmeOnStart": "true",
26
+ "node.js.detected.package.eslint": "true",
27
+ "node.js.detected.package.tslint": "true",
28
+ "node.js.selected.package.eslint": "(autodetect)",
29
+ "node.js.selected.package.tslint": "(autodetect)",
30
+ "nodejs_package_manager_path": "npm",
31
+ "vue.rearranger.settings.migration": "true"
32
+ }
33
+ }]]></component>
34
+ <component name="SharedIndexes">
35
+ <attachedChunks>
36
+ <set>
37
+ <option value="bundled-python-sdk-09665e90c3a7-b11f5e8da5ad-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-233.15026.15" />
38
+ </set>
39
+ </attachedChunks>
40
+ </component>
41
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
42
+ <component name="TaskManager">
43
+ <task active="true" id="Default" summary="Default task">
44
+ <changelist id="6367f652-10e6-41bd-8155-b4eab4e93752" name="Changes" comment="" />
45
+ <created>1727770539185</created>
46
+ <option name="number" value="Default" />
47
+ <option name="presentableId" value="Default" />
48
+ <updated>1727770539185</updated>
49
+ <workItem from="1727770542147" duration="660000" />
50
+ </task>
51
+ <servers />
52
+ </component>
53
+ <component name="TypeScriptGeneratedFilesManager">
54
+ <option name="version" value="3" />
55
+ </component>
56
+ </project>
Task02_Heart/__pycache__/half_vnet.cpython-311.pyc ADDED
Binary file (14 kB). View file
 
Task02_Heart/__pycache__/half_vnet.cpython-38.pyc ADDED
Binary file (6.89 kB). View file
 
Task02_Heart/__pycache__/vnet.cpython-311.pyc ADDED
Binary file (14.6 kB). View file
 
Task02_Heart/__pycache__/vnet.cpython-38.pyc ADDED
Binary file (7.07 kB). View file
 
Task02_Heart/data-augment.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import random
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ import tqdm
7
+ from matplotlib import pyplot as plt
8
+
9
+ import albumentations as A
10
+
11
+
12
+ def visualize(image, mask, original_image=None, original_mask=None):
13
+ fontsize = 18
14
+
15
+ if original_image is None and original_mask is None:
16
+ f, ax = plt.subplots(2, 1, figsize=(8, 8))
17
+
18
+ ax[0].imshow(image)
19
+ ax[1].imshow(mask)
20
+ else:
21
+ f, ax = plt.subplots(2, 2, figsize=(8, 8))
22
+
23
+ ax[0, 0].imshow(original_image)
24
+ ax[0, 0].set_title('Original image', fontsize=fontsize)
25
+
26
+ ax[1, 0].imshow(original_mask)
27
+ ax[1, 0].set_title('Original mask', fontsize=fontsize)
28
+
29
+ ax[0, 1].imshow(image)
30
+ ax[0, 1].set_title('Transformed image', fontsize=fontsize)
31
+
32
+ ax[1, 1].imshow(mask)
33
+ ax[1, 1].set_title('Transformed mask', fontsize=fontsize)
34
+ plt.show()
35
+ def augment_by_times():
36
+ image_path=r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\mr-cardiac\mri_train_2d\image'
37
+ label_path=r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\mr-cardiac\mri_train_2d\label'
38
+
39
+ image_paths=glob.glob(os.path.join(image_path,'*.png'))
40
+ # test=cv2.imread(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\mr-cardiac\train\label\mr_train_1001_image_100.png')
41
+ # print(np.unique(test))
42
+ times=2
43
+ for i in range(times):
44
+ for path in tqdm.tqdm(image_paths):
45
+
46
+ filename=path.split('\\')[-1]
47
+ # print(path,filename)
48
+
49
+ image = cv2.imread(path,0)
50
+ mask = cv2.imread(os.path.join(label_path,filename),0)
51
+ # print(image.dtype,mask.dtype,np.unique(mask))
52
+ # print(np.unique(mask))
53
+ # print(image.shape, mask.shape)
54
+ original_height, original_width = image.shape[:2]
55
+ aug = A.Compose([
56
+ A.PadIfNeeded(min_height=128,min_width=128,value=0,p=1),
57
+ # A.RandomSizedCrop(min_max_height=(128,256), height=original_height,
58
+ # width=original_width, p=0.5),
59
+ A.VerticalFlip(p=0.5),
60
+ A.RandomRotate90(p=0.5),
61
+ A.OneOf([
62
+ A.ElasticTransform(alpha=120, sigma=120 * 0.05,
63
+ alpha_affine=120 * 0.03, p=0.5),
64
+ A.GridDistortion(p=0.5),
65
+ A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)
66
+ ], p=0.8),
67
+ A.CLAHE(p=0.8),
68
+ A.RandomBrightnessContrast(p=0.8),
69
+ A.RandomGamma(p=0.8)
70
+ ]
71
+ )
72
+
73
+
74
+
75
+ # random.seed(11)
76
+ augmented = aug(image=image, mask=mask)
77
+
78
+ image_heavy = augmented['image']
79
+ mask_heavy = augmented['mask']
80
+ # print(mask_heavy.shape,np.unique(mask_heavy))
81
+ label_num=len(np.unique(mask_heavy))
82
+ # print(image_heavy.dtype,mask_heavy.dtype)
83
+ if label_num>=2:
84
+ # print(filename,np.unique(mask_heavy))
85
+ cv2.imwrite(os.path.join(image_path.replace('mri_train_2d','mri_aug_2d'),f'aug{i+1}_'+filename),image_heavy)
86
+ cv2.imwrite(os.path.join(label_path.replace('mri_train_2d', 'mri_aug_2d'),
87
+ f'aug{i+1}_' + filename), mask_heavy)
88
+ # visualize(image_heavy, mask_heavy, original_image=image,
89
+ # original_mask=mask)
90
+ # visualize(image, mask)
91
+ # print(type(image),image_heavy.shape)
92
+
93
+ # break
94
+ if __name__ == '__main__':
95
+ augment_by_times()
Task02_Heart/data-show.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import random
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ import tqdm
7
+ from matplotlib import pyplot as plt
8
+
9
+ import albumentations as A
10
+ # patient009_frame13_03.png
11
+ # patient030_frame01_07.png
12
+ # patient049_frame11_02.png
13
+ # patient069_frame01_00.png
14
+ # patient085_frame01_09.png
15
+
16
+ def visualize(image, mask, original_image=None, original_mask=None):
17
+ fontsize = 18
18
+
19
+ if original_image is None and original_mask is None:
20
+ f, ax = plt.subplots(2, 1, figsize=(8, 8))
21
+
22
+ ax[0].imshow(image)
23
+ ax[1].imshow(mask)
24
+ else:
25
+ f, ax = plt.subplots(2, 2, figsize=(8, 8))
26
+
27
+ ax[0, 0].imshow(original_image)
28
+ ax[0, 0].set_title('Original image', fontsize=fontsize)
29
+
30
+ ax[1, 0].imshow(original_mask)
31
+ ax[1, 0].set_title('Original mask', fontsize=fontsize)
32
+
33
+ ax[0, 1].imshow(image)
34
+ ax[0, 1].set_title('Transformed image', fontsize=fontsize)
35
+
36
+ ax[1, 1].imshow(mask)
37
+ ax[1, 1].set_title('Transformed mask', fontsize=fontsize)
38
+ plt.show()
39
+ # image_path=r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\mr-cardiac\train\image'
40
+ # label_path=r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\mr-cardiac\train\label'
41
+
42
+ # image_paths=glob.glob(os.path.join(image_path,'*.png'))
43
+ # test=cv2.imread(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\mr-cardiac\train\label\mr_train_1001_image_100.png')
44
+ # print(np.unique(test))
45
+ # for path in tqdm.tqdm(image_paths):
46
+
47
+ # filename=path.split('\\')[-1]
48
+ # print(path,filename)
49
+
50
+ image = cv2.imread('./mri_train_2d/image/mr_train_1011_image_100.png',0)
51
+ mask = cv2.imread('./mri_train_2d/label/mr_train_1011_image_100.png',0)
52
+ # print(image.dtype,mask.dtype,np.unique(mask))
53
+ # print(np.unique(mask))
54
+ # print(image.shape, mask.shape)
55
+ original_height, original_width = image.shape[:2]
56
+
57
+ f, ax = plt.subplots(2, 7, figsize=(32,8),squeeze=True)
58
+ plt.axis('off')
59
+
60
+ image=A.resize(image,256,256)
61
+ mask=A.resize(mask,256,256)
62
+ ax[0,0].imshow(image,cmap='gray',)
63
+ ax[0,0].axis('off')
64
+
65
+ ax[1,0].imshow(mask,cmap='CMRmap')
66
+ ax[1,0].axis('off')
67
+ plt.gcf().tight_layout()
68
+ aug=A.VerticalFlip()
69
+ augmented = aug(image=image, mask=mask)
70
+ image_heavy = augmented['image']
71
+ mask_heavy = augmented['mask']
72
+ ax[0,1].imshow(image_heavy,cmap='gray')
73
+ ax[0,1].axis('off')
74
+ ax[1,1].imshow(mask_heavy,cmap='CMRmap')
75
+ ax[1,1].axis('off')
76
+
77
+ aug=A.RandomRotate90()
78
+ augmented = aug(image=image, mask=mask)
79
+ image_heavy = augmented['image']
80
+ mask_heavy = augmented['mask']
81
+ ax[0,2].imshow(image_heavy,cmap='gray')
82
+ ax[0,2].axis('off')
83
+ ax[1,2].imshow(mask_heavy,cmap='CMRmap')
84
+ ax[1,2].axis('off')
85
+
86
+
87
+ aug=A.RandomSizedCrop(min_max_height=(100,200), height=256,
88
+ width=256)
89
+ augmented = aug(image=image, mask=mask)
90
+ image_heavy = augmented['image']
91
+ mask_heavy = augmented['mask']
92
+ ax[0,3].imshow(image_heavy,cmap='gray')
93
+ ax[0,3].axis('off')
94
+ ax[1,3].imshow(mask_heavy,cmap='CMRmap')
95
+ ax[1,3].axis('off')
96
+
97
+ aug=A.ElasticTransform(alpha=120, sigma=120 * 0.05,
98
+ alpha_affine=120 * 0.03)
99
+ augmented = aug(image=image, mask=mask)
100
+ image_heavy = augmented['image']
101
+ mask_heavy = augmented['mask']
102
+ ax[0,4].imshow(image_heavy,cmap='gray')
103
+ ax[0,4].axis('off')
104
+ ax[1,4].imshow(mask_heavy,cmap='CMRmap')
105
+ ax[1,4].axis('off')
106
+
107
+
108
+ aug=A.GridDistortion()
109
+ augmented = aug(image=image, mask=mask)
110
+ image_heavy = augmented['image']
111
+ mask_heavy = augmented['mask']
112
+ ax[0,5].imshow(image_heavy,cmap='gray')
113
+ ax[0,5].axis('off')
114
+ ax[1,5].imshow(mask_heavy,cmap='CMRmap')
115
+ ax[1,5].axis('off')
116
+
117
+ aug=A.OpticalDistortion(distort_limit=2, shift_limit=0.5)
118
+ augmented = aug(image=image, mask=mask)
119
+ image_heavy = augmented['image']
120
+ mask_heavy = augmented['mask']
121
+ ax[0,6].imshow(image_heavy,cmap='gray')
122
+ ax[0,6].axis('off')
123
+
124
+
125
+ ax[1,6].imshow(mask_heavy,cmap='CMRmap')
126
+ ax[1,6].axis('off')
127
+ plt.tight_layout()
128
+ plt.subplots_adjust(wspace=0,hspace=0)
129
+ plt.show()
130
+ # f.savefig('a.png',bbox_inches='tight')
131
+
132
+
133
+ # aug = A.Compose([
134
+ # # A.PadIfNeeded(min_height=128,min_width=128,value=0,p=1),
135
+ # A.RandomSizedCrop(min_max_height=(128,256), height=original_height,
136
+ # width=original_width, p=0.5),
137
+ # A.VerticalFlip(p=0.5),
138
+ # A.RandomRotate90(p=0.5),
139
+ # A.OneOf([
140
+ # A.ElasticTransform(alpha=120, sigma=120 * 0.05,
141
+ # alpha_affine=120 * 0.03, p=0.5),
142
+ # A.GridDistortion(p=0.5),
143
+ # A.OpticalDistortion(distort_limit=2, shift_limit=0.5, p=1)
144
+ # ], p=0.8),
145
+ # A.CLAHE(p=0.8),
146
+ # A.RandomBrightnessContrast(p=0.8),
147
+ # A.RandomGamma(p=0.8)
148
+ # ]
149
+ # )
150
+
151
+
152
+
153
+ # random.seed(11)
154
+ # augmented = aug(image=image, mask=mask)
155
+ #
156
+ # image_heavy = augmented['image']
157
+ # mask_heavy = augmented['mask']
158
+ # # print(mask_heavy.shape,np.unique(mask_heavy))
159
+ # label_num=len(np.unique(mask_heavy))
160
+ # # print(image_heavy.dtype,mask_heavy.dtype)
161
+ # # if label_num>=2:
162
+ # # print(filename,np.unique(mask_heavy))
163
+ # # cv2.imwrite(os.path.join(image_path.replace('train','aug_train'),'aug_v2'+filename),image_heavy)
164
+ # # cv2.imwrite(os.path.join(label_path.replace('train', 'aug_train'),
165
+ # # 'aug_v2' + filename), mask_heavy)
166
+ # visualize(image_heavy, mask_heavy, original_image=image,
167
+ # original_mask=mask)
168
+ # visualize(image, mask)
169
+ # print(type(image),image_heavy.shape)
170
+
171
+ # break
Task02_Heart/data_proc_for_3d.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import cv2
4
+ import matplotlib.pyplot as plt
5
+ import nibabel as nib
6
+ import os
7
+ import glob
8
+ # from scipy.ndimage import zoom
9
+ import numpy as np
10
+ import skimage.transform
11
+ import torch.optim
12
+ from skimage import transform
13
+ from scipy.ndimage import binary_fill_holes, zoom
14
+ from scipy.ndimage import map_coordinates
15
+ from vnet import VNet
16
+ from half_vnet import HalfVNet
17
+ from torch.utils.data import Dataset, DataLoader
18
+ import torch.nn as nn
19
+ from torch.optim import AdamW
20
+ from torch.cuda.amp import GradScaler
21
+ from torch.cuda.amp import autocast
22
+ from tqdm import tqdm
23
+ def handle_image_and_label():
24
+ cnt = 0
25
+ pos_label = []
26
+ image_paths = glob.glob(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\Task02_Heart\labelsTr\*.nii.gz')
27
+ for path in image_paths:
28
+ folder = 'mri_train_2d1'
29
+ filename = path.split('\\')[-1].split('.')[0].replace('label', 'image')
30
+
31
+ print(filename)
32
+ # 获取image,转换成合适的维度
33
+ image = nib.load(path).dataobj
34
+ image = np.array(image, dtype=np.int8)
35
+ image = np.swapaxes(image, 1, 2)
36
+ image = np.swapaxes(image, 0, 1)
37
+ D, H, W = image.shape
38
+ plt.subplot(1, 3, 1)
39
+ plt.imshow(image[60, :, :])
40
+ image = transform.resize(image, (128, 320, 320))
41
+ plt.subplot(1, 3, 2)
42
+ plt.imshow(image[60, :, :])
43
+
44
+ # 获取归一化的坐标
45
+ z_min, z_max = get_min_and_max_by_axis(image, 0)
46
+ x_min, x_max = get_min_and_max_by_axis(image, 1)
47
+ y_min, y_max = get_min_and_max_by_axis(image, 2)
48
+ label = [z_min, z_max, x_min, x_max, y_min, y_max]
49
+ print(image.shape, label)
50
+ pos_label.append(label)
51
+
52
+ image = transform.resize(image, (D, 320, 320))
53
+ plt.subplot(1, 3, 3)
54
+ plt.imshow(image[60, :, :])
55
+ plt.show()
56
+ pos_label = np.array(pos_label)
57
+ print(pos_label.shape)
58
+ np.save('./imagesTr/pos_labels.npy', pos_label)
59
+
60
+
61
+ def get_min_and_max_by_axis(image, axis, eps=1e-2):
62
+ label_list = []
63
+ length = image.shape[axis]
64
+ if axis == 0:
65
+ for i in range(length):
66
+ if len(np.unique(image[i, :, :])) != 1:
67
+ label_list.append(i)
68
+ elif axis == 1:
69
+ for i in range(length):
70
+ if len(np.unique(image[:, i, :])) != 1:
71
+ label_list.append(i)
72
+ elif axis == 2:
73
+ for i in range(length):
74
+ if len(np.unique(image[:, :, i])) != 1:
75
+ label_list.append(i)
76
+ norm_min, norm_max = min(label_list) / length - eps, max(label_list) / length + eps
77
+ print(min(label_list), int(norm_min * length), max(label_list), int(norm_max * length))
78
+ return norm_min, norm_max
79
+
80
+
81
+ class NIIDataset(Dataset):
82
+ def __init__(self, path,resize_shape):
83
+ super().__init__()
84
+ self.image_paths = glob.glob(path)
85
+ label_path=path[:-8]+'pos_labels.npy'
86
+ self.labels=np.load(label_path)
87
+ self.resize_shape=resize_shape
88
+ def __len__(self):
89
+ return len(self.image_paths)
90
+ def __getitem__(self, index):
91
+ image=np.array(nib.load(self.image_paths[index]).dataobj)
92
+ image=transform.resize(image,output_shape=self.resize_shape)[np.newaxis,:]
93
+ label=self.labels[index]
94
+ return image,label
95
+
96
+ if __name__ == '__main__':
97
+ # handle_image_and_label()
98
+ dataset = NIIDataset(path='./imagesTr/*.nii.gz',resize_shape=(128,320,320))
99
+ dataloader=DataLoader(dataset,batch_size=2,shuffle=True)
100
+ test_dataloader=DataLoader(dataset,batch_size=1)
101
+ device="cuda" if torch.cuda.is_available() else "cpu"
102
+ print(device)
103
+ model=HalfVNet().to(device)
104
+ criterion=torch.nn.L1Loss(reduction='sum').to(device)
105
+ optimizer=torch.optim.AdamW(model.parameters(),lr=0.001)
106
+ scaler=GradScaler()
107
+ EPOCHS=100
108
+
109
+ # weights=torch.load('./weights/vnet_100.pth')
110
+ # model.load_state_dict(weights)
111
+
112
+ TRAIN=True
113
+ TEST=True
114
+ if TRAIN:
115
+ for epoch in range(1,EPOCHS+1):
116
+ model.train()
117
+ losses=[]
118
+ train_bar=tqdm(dataloader,file=sys.stdout)
119
+ for step,(images,labels) in enumerate(train_bar):
120
+ with autocast():
121
+ images=images.to(device)
122
+ labels=labels.to(torch.float32).to(device)
123
+ output=model(images)
124
+
125
+ optimizer.zero_grad()
126
+ loss=criterion(output,labels)
127
+ # loss.backward()
128
+ # optimizer.step()
129
+ scaler.scale(loss).backward()
130
+ scaler.step(optimizer)
131
+ scaler.update()
132
+ losses.append(loss.item())
133
+ # print(f"epoch:{epoch},mean loss:{sum(losses)/len(losses)}")
134
+ train_bar.set_postfix(epoch=epoch,step=step,step_loss=loss.item(),mean_loss=sum(losses)/len(losses))
135
+ if epoch%10==0:
136
+ torch.save(model.state_dict(),f'./weights/vnet_{epoch}.pth')
137
+ if TEST:
138
+ weights=torch.load('./weights/vnet_100.pth')
139
+ model.load_state_dict(weights)
140
+ model.eval()
141
+ for step,(images,labels) in enumerate(dataloader):
142
+ with autocast():
143
+ images = images.to(device)
144
+ labels = labels.to(torch.float32).to(device)
145
+ output = model(images)
146
+ print("labels:",labels)
147
+ print("predicts:",output)
Task02_Heart/dataprocess.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import matplotlib.pyplot as plt
3
+ import nibabel as nib
4
+ import os
5
+ import glob
6
+ # from scipy.ndimage import zoom
7
+ import numpy as np
8
+ import skimage.transform
9
+ import torch.optim
10
+ from skimage import transform
11
+ from scipy.ndimage import binary_fill_holes,zoom
12
+ from scipy.ndimage import map_coordinates
13
+ #todo:先裁剪出bounding box,在resize成统一大小
14
+ # from imblearn.over_sampling import SMOTE
15
+
16
+ # image_paths=glob.glob('./train/label/*.png')
17
+ # cnt_mp={0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0}
18
+ # for path in image_paths:
19
+ # image=cv2.imread(path)[:,:,0]
20
+ # for i in cnt_mp:
21
+ # cnt_mp[i]+=np.sum(image==i)
22
+ #
23
+ # # break
24
+ # print(cnt_mp)
25
+ # cnt_mp.pop(0)
26
+ # cnt=0
27
+ # for i in cnt_mp:
28
+ # cnt+=cnt_mp[i]
29
+ # for i in cnt_mp:
30
+ # print(i,(cnt/len(cnt_mp)/cnt_mp[i]))
31
+
32
+
33
+ # image_paths=glob.glob('./mr_train/*_image.nii.gz')
34
+ # for path in image_paths:
35
+ # # print(path)
36
+ #
37
+ # filename=path.split('\\')[-1].split('.')[0]
38
+ #
39
+ # print(filename)
40
+ # image=nib.load(path).dataobj
41
+ # image=np.floor((image-np.min(image))/(np.max(image)-np.min(image))*255)
42
+ # # image=zoom(image,[256/image.shape[0],256/image.shape[1],128/image.shape[2]],order=0)
43
+ # # print(image.dtype)
44
+ #
45
+ # for i in range(image.shape[-1]):
46
+ #
47
+ # cv2.imwrite(os.path.join('train/image',f'{filename}_{i}.png'),image[:,:,i])
48
+ #
49
+ # # break
50
+ # #
51
+
52
+ # os.path.join('')
53
+ # image_paths=glob.glob('./ct_train/*_image.nii.gz')
54
+ # for path in image_paths:
55
+ # data_info=nib.load(path)
56
+ # h_,w_,d_=data_info.header['pixdim'][1:4]
57
+ # h,w,d=data_info.shape
58
+ # data=data_info.get_fdata()
59
+ #
60
+ # print(data.shape)
61
+ # print('实际大小',int(h*h_),int(w*w_),int(d*d_))
62
+ # # part_image=data[data!=-1024]
63
+ # # data=(data-np.mean(data))/np.std(data)
64
+ # # data=cv2.imread('../xm12/train_image/01001.png')
65
+ # # data=(data-np.mean(part_image))/np.std(part_image)
66
+ # print('取值范围', data.min(), data.max())
67
+ # # plt.imshow(data[:, :, 0],cmap='gray')
68
+ # # plt.show()
69
+ #
70
+ # # nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
71
+ # # for c in range(data.shape[0]):
72
+ # # this_mask = data[c] != 0
73
+ # # nonzero_mask = nonzero_mask | this_mask
74
+ # # nonzero_mask = binary_fill_holes(nonzero_mask)
75
+ # break
76
+ def handle_image_and_label():
77
+ cnt=0
78
+ image_paths=glob.glob(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\Task02_Heart\labelsTr\*.nii.gz')
79
+ for path in image_paths:
80
+ # print(path)
81
+ # if cnt<16:
82
+ # folder='mri_train_2d'
83
+ # else:
84
+ # folder='mri_test_2d'
85
+ folder='mri_train_2d'
86
+ filename=path.split('\\')[-1].split('.')[0].replace('label','image')
87
+
88
+ print(filename)
89
+
90
+ image=nib.load(path).dataobj
91
+ image=np.array(image,dtype=np.int8)
92
+ # print(image.shape)
93
+ # print(np.unique(image))
94
+ '''
95
+ label_map = [0, 1]
96
+
97
+ for i, v in enumerate(label_map):
98
+ image = np.where(image == v, i, image)
99
+ image = np.where(image == 421, 2, image)
100
+
101
+ # 能用的label-resize
102
+ rows, cols, dim = 256,256,image.shape[-1]
103
+ orig_rows, orig_cols, orig_dim = image.shape
104
+
105
+ row_scale = float(orig_rows) / rows
106
+ col_scale = float(orig_cols) / cols
107
+ dim_scale = float(orig_dim) / dim
108
+
109
+ map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
110
+ map_rows = row_scale * (map_rows + 0.5) - 0.5
111
+ map_cols = col_scale * (map_cols + 0.5) - 0.5
112
+ map_dims = dim_scale * (map_dims + 0.5) - 0.5
113
+
114
+ coord_map = np.array([map_rows, map_cols, map_dims])
115
+
116
+
117
+ image=map_coordinates(image, coord_map, order=1)
118
+ '''
119
+ # 自己写的label-resize
120
+ # print(image.shape,type(image),image.dtype,np.unique(image))
121
+ # break
122
+ # print(np.unique(image))
123
+
124
+ # final_index=[]
125
+ # # print(np.unique(image,axis=0).shape)
126
+ # # print(np.unique(image, axis=1).shape)
127
+ # # print(np.unique(image, axis=2).shape)
128
+ # temp_index=[]
129
+ # for i in range(0,image.shape[0]):
130
+ # if len(np.unique(image[i,:,:]))!=1:
131
+ # temp_index.append(i)
132
+ # break
133
+ # for i in range(image.shape[0]-1,0,-1):
134
+ # if len(np.unique(image[i,:,:])) != 1:
135
+ # temp_index.append(i)
136
+ # break
137
+ # final_index.append(temp_index)
138
+ #
139
+ # temp_index = []
140
+ # for i in range(0,image.shape[1]):
141
+ # if len(np.unique(image[:, i, :])) != 1:
142
+ # temp_index.append(i)
143
+ # break
144
+ # for i in range(image.shape[1] - 1, 0, -1):
145
+ # if len(np.unique(image[:, i, :])) != 1:
146
+ # temp_index.append(i)
147
+ # break
148
+ # final_index.append(temp_index)
149
+ #
150
+ # temp_index = []
151
+ # for i in range(0,image.shape[2]):
152
+ # if len(np.unique(image[:, :, i])) != 1:
153
+ # temp_index.append(i)
154
+ # break
155
+ # for i in range(image.shape[2] - 1, 0, -1):
156
+ # if len(np.unique(image[:, :, i])) != 1:
157
+ # temp_index.append(i)
158
+ # break
159
+ # final_index.append(temp_index)
160
+ #
161
+ # print(final_index)
162
+
163
+ # image=image[final_index[0][0]:final_index[0][1],
164
+ # final_index[1][0]:final_index[1][1],
165
+ # final_index[2][0]:final_index[2][1]]
166
+
167
+ # 1 2 3
168
+
169
+ # 注意用保存为图片时,数值类型不要unsigned
170
+ image=image.astype(np.int8)
171
+ print(np.unique(image),type(image),image.shape,image.dtype)
172
+ # print(np.unique(image[:,:,60]))
173
+ for i in range(image.shape[-1]):
174
+ cv2.imwrite(os.path.join(f'{folder}/label',f'{filename}_{i}.png'),image[:,:,i])
175
+
176
+ # np.save(os.path.join('./train-3d/label',filename),image)
177
+
178
+ # if len(np.unique(image>=3)):
179
+ # print(image.shape)
180
+ # for i in range(image.shape[-1]):
181
+ # cv2.imwrite(os.path.join('train/label',f'{filename}_{i}.png'),image[:,:,i])
182
+ # filename=filename.replace('label','image')
183
+
184
+ image = np.array(nib.load(os.path.join('imagesTr', filename + '.nii.gz')).dataobj)
185
+ print(np.unique(image))
186
+
187
+ # f,ax=plt.subplots(2,1)
188
+ # ax[0].imshow(image[:,:,0])
189
+
190
+
191
+ # ct处理方式
192
+ # image=((image+1024)/4095)*255
193
+ # mri处理方式
194
+ image=((image-np.min(image))/(np.max(image)-np.min(image)))*255
195
+
196
+
197
+ # print(np.unique(image))
198
+ # ax[1].imshow(image[:,:,0])
199
+ # plt.show()
200
+ # print(image[:,:,0][128])
201
+ # image = np.floor(
202
+ # (image - np.min(image)) / (np.max(image) - np.min(image)) * 255)
203
+ # image = image[final_index[0][0]:final_index[0][1],
204
+ # final_index[1][0]:final_index[1][1],
205
+ # final_index[2][0]:final_index[2][1]]
206
+ # image=(image-np.min(image))/(np.max(image)-np.min(image))*255
207
+
208
+ # 能用的image-resize
209
+ '''
210
+ image=skimage.transform.resize(image,(256,256,image.shape[-1]),order=3)
211
+ '''
212
+ # image=image.astype(np.float32)
213
+ # np.save(os.path.join('./train-3d/image',filename),image)
214
+ print(image.dtype)
215
+ # print(np.unique(image[:,:,60]))
216
+ for i in range(image.shape[-1]):
217
+ cv2.imwrite(os.path.join(f'{folder}/image',f'{filename}_{i}.png'),image[:,:,i])
218
+ cnt+=1
219
+ # break
220
+ if __name__ == '__main__':
221
+
222
+ # paths=glob.glob(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\Task02_Heart\imagesTr\*.nii.gz')
223
+ # for p in paths:
224
+ #
225
+ # img=nib.load(p).dataobj
226
+ # print(np.min(img),np.max(img))
227
+
228
+
229
+ handle_image_and_label()
230
+
231
+ img=cv2.imread('./mri_train_2d/image/la_003_60.png',0)
232
+ label=cv2.imread('./mri_train_2d/label/la_003_60.png',0)
233
+ print(np.unique(label))
234
+ plt.subplot(1,2,1)
235
+ plt.imshow(img)
236
+ plt.subplot(1,2,2)
237
+ plt.imshow(label,cmap='gray',interpolation='none')
238
+ plt.show()
239
+
240
+ # img_3d=nib.load('./mr_train/mr_train_1011_image.nii.gz').dataobj
241
+ # label_3d=nib.load('./mr_train/mr_train_1011_label.nii.gz').dataobj
242
+ #
243
+ # # print(img_3d.shape)
244
+ # img_slice=img_3d[:,:,90]
245
+ #
246
+ # label_slice=label_3d[:,:,90]
247
+ # # label_slice=np.where(label_slice==420,2,label_slice)
248
+ # # label_slice = np.where(label_slice == 850, 7, label_slice)
249
+ # print(np.unique(label_slice))
250
+ # fig,ax=plt.subplots(1,2)
251
+ # ax[0].imshow(img_slice,cmap='gray')
252
+ # ax[1].imshow(label_slice,cmap='CMRmap')
253
+ # plt.show()
Task02_Heart/dataset.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "LeftAtrium",
3
+ "description": "Left atrium segmentation",
4
+ "tensorImageSize": "3D",
5
+ "reference": "King’s College London",
6
+ "licence":"CC-BY-SA 4.0",
7
+ "relase":"1.0 04/05/2018",
8
+ "modality": {
9
+ "0": "MRI"
10
+ },
11
+ "labels": {
12
+ "0": "background",
13
+ "1": "left atrium"
14
+ },
15
+ "numTraining": 20,
16
+ "numTest": 10,
17
+ "training":[{"image":"./imagesTr/la_007.nii.gz","label":"./labelsTr/la_007.nii.gz"},{"image":"./imagesTr/la_019.nii.gz","label":"./labelsTr/la_019.nii.gz"},{"image":"./imagesTr/la_023.nii.gz","label":"./labelsTr/la_023.nii.gz"},{"image":"./imagesTr/la_005.nii.gz","label":"./labelsTr/la_005.nii.gz"},{"image":"./imagesTr/la_009.nii.gz","label":"./labelsTr/la_009.nii.gz"},{"image":"./imagesTr/la_017.nii.gz","label":"./labelsTr/la_017.nii.gz"},{"image":"./imagesTr/la_021.nii.gz","label":"./labelsTr/la_021.nii.gz"},{"image":"./imagesTr/la_029.nii.gz","label":"./labelsTr/la_029.nii.gz"},{"image":"./imagesTr/la_003.nii.gz","label":"./labelsTr/la_003.nii.gz"},{"image":"./imagesTr/la_011.nii.gz","label":"./labelsTr/la_011.nii.gz"},{"image":"./imagesTr/la_030.nii.gz","label":"./labelsTr/la_030.nii.gz"},{"image":"./imagesTr/la_022.nii.gz","label":"./labelsTr/la_022.nii.gz"},{"image":"./imagesTr/la_014.nii.gz","label":"./labelsTr/la_014.nii.gz"},{"image":"./imagesTr/la_018.nii.gz","label":"./labelsTr/la_018.nii.gz"},{"image":"./imagesTr/la_020.nii.gz","label":"./labelsTr/la_020.nii.gz"},{"image":"./imagesTr/la_004.nii.gz","label":"./labelsTr/la_004.nii.gz"},{"image":"./imagesTr/la_016.nii.gz","label":"./labelsTr/la_016.nii.gz"},{"image":"./imagesTr/la_024.nii.gz","label":"./labelsTr/la_024.nii.gz"},{"image":"./imagesTr/la_010.nii.gz","label":"./labelsTr/la_010.nii.gz"},{"image":"./imagesTr/la_026.nii.gz","label":"./labelsTr/la_026.nii.gz"}],
18
+ "test":["./imagesTs/la_015.nii.gz","./imagesTs/la_025.nii.gz","./imagesTs/la_013.nii.gz","./imagesTs/la_001.nii.gz","./imagesTs/la_027.nii.gz","./imagesTs/la_006.nii.gz","./imagesTs/la_008.nii.gz","./imagesTs/la_012.nii.gz","./imagesTs/la_028.nii.gz","./imagesTs/la_002.nii.gz"]
19
+ }
Task02_Heart/get_3d_data.py ADDED
File without changes
Task02_Heart/get_precise_seg_part.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import cv2
4
+ import matplotlib.pyplot as plt
5
+ import nibabel as nib
6
+ import os
7
+ import glob
8
+ # from scipy.ndimage import zoom
9
+ import numpy as np
10
+ import skimage.transform
11
+ import torch.optim
12
+ from skimage import transform
13
+ from scipy.ndimage import binary_fill_holes, zoom
14
+ from scipy.ndimage import map_coordinates
15
+ from vnet import VNet
16
+ from half_vnet import HalfVNet
17
+ from torch.utils.data import Dataset, DataLoader
18
+ import torch.nn as nn
19
+ from torch.optim import AdamW
20
+ from torch.cuda.amp import GradScaler
21
+ from torch.cuda.amp import autocast
22
+ from tqdm import tqdm
23
+ def handle_image_and_label():
24
+ cnt = 0
25
+ pos_label = []
26
+ image_paths = glob.glob(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\Task02_Heart\labelsTr\*.nii.gz')
27
+ data_paths=glob.glob(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\Task02_Heart\imagesTr\*.nii.gz')
28
+ for i,path in enumerate(image_paths):
29
+ folder = 'mri_z_precise_train_2d'
30
+ filename = path.split('\\')[-1].split('.')[0].replace('label', 'image')
31
+
32
+ print(filename)
33
+ # 获取image,转换成合适的维度
34
+ image = nib.load(path).dataobj
35
+ data=nib.load(data_paths[i]).dataobj
36
+ image = np.array(image, dtype=np.int8)
37
+ image = np.swapaxes(image, 1, 2)
38
+ image = np.swapaxes(image, 0, 1)
39
+ data = np.swapaxes(data, 1, 2)
40
+ data = np.swapaxes(data, 0, 1)
41
+ print(np.min(data),np.max(data))
42
+ data=((data-np.min(data))/(np.max(data)-np.min(data)))*255
43
+ data=np.array(data,dtype=int)
44
+ D, H, W = image.shape
45
+ plt.subplot(1, 3, 1)
46
+ plt.imshow(image[60, :, :])
47
+ image = transform.resize(image, (128, 320, 320))
48
+ plt.subplot(1, 3, 2)
49
+ plt.imshow(image[60, :, :])
50
+ # 获取归一化的坐标
51
+ z_min, z_max = get_min_and_max_by_axis(image, 0)
52
+ x_min, x_max = get_min_and_max_by_axis(image, 1)
53
+ y_min, y_max = get_min_and_max_by_axis(image, 2)
54
+ label = [z_min, z_max, x_min, x_max, y_min, y_max]
55
+ print(image.shape, label)
56
+ pos_label.append(label)
57
+
58
+ image = transform.resize(image, (D, 320, 320))
59
+ plt.subplot(1, 3, 3)
60
+ plt.imshow(image[60, :, :])
61
+ plt.show()
62
+
63
+ indices=[]
64
+ ranges=[128,128,320,320,320,320]
65
+ for i in range(len(label)):
66
+ indices.append(int(label[i]*ranges[i]))
67
+ print(indices)
68
+
69
+ image = nib.load(path).dataobj
70
+ image = np.array(image, dtype=np.int8)
71
+ image = np.swapaxes(image, 1, 2)
72
+ image = np.swapaxes(image, 0, 1)
73
+
74
+ # label_nii=image[indices[0]:indices[1],indices[2]:indices[3],indices[4]:indices[5]]
75
+ # print(np.unique(label_nii))
76
+ # data_nii=data[indices[0]:indices[1],indices[2]:indices[3],indices[4]:indices[5]]
77
+ label_nii=image[indices[0]:indices[1],:]
78
+ # print(np.unique(label_nii))
79
+ data_nii=data[indices[0]:indices[1],:]
80
+
81
+ for i in range(label_nii.shape[0]):
82
+ cv2.imwrite(os.path.join(f'{folder}/label', f'{filename}_{i}.png'), label_nii[i,:, :])
83
+ # print(np.unique(label_nii[i, :, :]))
84
+ for i in range(len(data_nii)):
85
+ cv2.imwrite(os.path.join(f'{folder}/image', f'{filename}_{i}.png'), data_nii[i,:, :])
86
+
87
+ pos_label = np.array(pos_label)
88
+ print(pos_label.shape)
89
+ # np.save('./imagesTr/pos_labels.npy', pos_label)
90
+
91
+
92
+ def get_min_and_max_by_axis(image, axis, eps=1e-2):
93
+ label_list = []
94
+ length = image.shape[axis]
95
+ if axis == 0:
96
+ for i in range(length):
97
+ if len(np.unique(image[i, :, :])) != 1:
98
+ label_list.append(i)
99
+ elif axis == 1:
100
+ for i in range(length):
101
+ if len(np.unique(image[:, i, :])) != 1:
102
+ label_list.append(i)
103
+ elif axis == 2:
104
+ for i in range(length):
105
+ if len(np.unique(image[:, :, i])) != 1:
106
+ label_list.append(i)
107
+ norm_min, norm_max = min(label_list) / length - eps, max(label_list) / length + eps
108
+ print(min(label_list), int(norm_min * length), max(label_list), int(norm_max * length))
109
+ return norm_min, norm_max
110
+ if __name__ == '__main__':
111
+ handle_image_and_label()
Task02_Heart/half_vnet.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ def passthrough(x, **kwargs):
7
+ return x
8
+
9
+ def ELUCons(elu, nchan):
10
+ if elu:
11
+ return nn.ELU(inplace=True)
12
+ else:
13
+ return nn.PReLU(nchan)
14
+
15
+ # normalization between sub-volumes is necessary
16
+ # for good performance
17
+ class ContBatchNorm3d(nn.modules.batchnorm._BatchNorm):
18
+ def _check_input_dim(self, input):
19
+ if input.dim() != 5:
20
+ raise ValueError('expected 5D input (got {}D input)'
21
+ .format(input.dim()))
22
+ super(ContBatchNorm3d, self)._check_input_dim(input)
23
+
24
+ def forward(self, input):
25
+ self._check_input_dim(input)
26
+ return F.batch_norm(
27
+ input, self.running_mean, self.running_var, self.weight, self.bias,
28
+ True, self.momentum, self.eps)
29
+
30
+
31
+ class LUConv(nn.Module):
32
+ def __init__(self, nchan, elu):
33
+ super(LUConv, self).__init__()
34
+ self.relu1 = ELUCons(elu, nchan)
35
+ self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=5, padding=2)
36
+ self.bn1 = nn.BatchNorm3d(nchan)
37
+
38
+ def forward(self, x):
39
+ out = self.relu1(self.bn1(self.conv1(x)))
40
+ return out
41
+
42
+
43
+ def _make_nConv(nchan, depth, elu):
44
+ layers = []
45
+ for _ in range(depth):
46
+ layers.append(LUConv(nchan, elu))
47
+ return nn.Sequential(*layers)
48
+
49
+
50
+ class InputTransition(nn.Module):
51
+ def __init__(self, outChans, elu):
52
+ super(InputTransition, self).__init__()
53
+ self.conv1 = nn.Conv3d(1, 16, kernel_size=5, padding=2)
54
+ self.bn1 = nn.BatchNorm3d(16)
55
+ self.relu1 = ELUCons(elu, 16)
56
+
57
+ def forward(self, x):
58
+ # do we want a PRELU here as well?
59
+ out=self.conv1(x)
60
+ out = self.bn1(out)
61
+ # split input in to 16 channels
62
+ x16 = torch.cat((x, x, x, x, x, x, x, x,
63
+ x, x, x, x, x, x, x, x), 1)
64
+ out = self.relu1(torch.add(out, x16))
65
+ return out
66
+
67
+
68
+ class DownTransition(nn.Module):
69
+ def __init__(self, inChans, nConvs, elu, dropout=False):
70
+ super(DownTransition, self).__init__()
71
+ outChans = 2*inChans
72
+ self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2)
73
+ self.bn1 = nn.BatchNorm3d(outChans)
74
+ self.do1 = passthrough
75
+ self.relu1 = ELUCons(elu, outChans)
76
+ self.relu2 = ELUCons(elu, outChans)
77
+ if dropout:
78
+ self.do1 = nn.Dropout3d()
79
+ self.ops = _make_nConv(outChans, nConvs, elu)
80
+
81
+ def forward(self, x):
82
+ down = self.relu1(self.bn1(self.down_conv(x)))
83
+ out = self.do1(down)
84
+ out = self.ops(out)
85
+ out = self.relu2(torch.add(out, down))
86
+ return out
87
+
88
+
89
+ class UpTransition(nn.Module):
90
+ def __init__(self, inChans, outChans, nConvs, elu, dropout=False):
91
+ super(UpTransition, self).__init__()
92
+ self.up_conv = nn.ConvTranspose3d(inChans, outChans // 2, kernel_size=2, stride=2)
93
+ self.bn1 = nn.BatchNorm3d(outChans // 2)
94
+ self.do1 = passthrough
95
+ self.do2 = nn.Dropout3d()
96
+ self.relu1 = ELUCons(elu, outChans // 2)
97
+ self.relu2 = ELUCons(elu, outChans)
98
+ if dropout:
99
+ self.do1 = nn.Dropout3d()
100
+ self.ops = _make_nConv(outChans, nConvs, elu)
101
+
102
+ def forward(self, x, skipx):
103
+ out = self.do1(x)
104
+ skipxdo = self.do2(skipx)
105
+ out = self.relu1(self.bn1(self.up_conv(out)))
106
+ xcat = torch.cat((out, skipxdo), 1)
107
+ out = self.ops(xcat)
108
+ out = self.relu2(torch.add(out, xcat))
109
+ return out
110
+
111
+
112
+ class OutputTransition(nn.Module):
113
+ def __init__(self, inChans, elu, nll):
114
+ super(OutputTransition, self).__init__()
115
+ self.conv1 = nn.Conv3d(inChans, 2, kernel_size=5, padding=2)
116
+ self.bn1 = nn.BatchNorm3d(2)
117
+ self.conv2 = nn.Conv3d(2, 2, kernel_size=1)
118
+ self.relu1 = ELUCons(elu, 2)
119
+ if nll:
120
+ self.softmax = F.log_softmax
121
+ else:
122
+ self.softmax = F.softmax
123
+
124
+ def forward(self, x):
125
+ # convolve 32 down to 2 channels
126
+ out = self.relu1(self.bn1(self.conv1(x)))
127
+ out = self.conv2(out)
128
+ print(out.shape)
129
+ # make channels the last axis
130
+ out = out.permute(0, 2, 3, 4, 1).contiguous()
131
+ # flatten
132
+ out = out.view(out.numel() // 2, 2)
133
+ out = self.softmax(out)
134
+ # treat channel 0 as the predicted output
135
+ return out
136
+ class AdaptivePoolTransition(nn.Module):
137
+ def __init__(self,in_channels,out_channels):
138
+ super().__init__()
139
+ self.pool=nn.AdaptiveAvgPool3d(1)
140
+ self.ops=nn.Sequential(
141
+ nn.Linear(in_channels,in_channels//2),
142
+ ELUCons(elu=True,nchan=0),
143
+ nn.Linear(in_channels // 2,out_channels),
144
+ nn.Sigmoid()
145
+ )
146
+ def forward(self,x):
147
+ B,C,D,H,W=x.shape
148
+ out=self.pool(x).view(B,C)
149
+ return self.ops(out)
150
+
151
+ class HalfVNet(nn.Module):
152
+ # the number of convolutions in each layer corresponds
153
+ # to what is in the actual prototxt, not the intent
154
+ def __init__(self, elu=True, nll=False):
155
+ super(HalfVNet, self).__init__()
156
+ self.in_tr = InputTransition(16, elu)
157
+ self.down_tr32 = DownTransition(16, 1, elu)
158
+ self.down_tr64 = DownTransition(32, 2, elu)
159
+ self.down_tr128 = DownTransition(64, 3, elu, dropout=True)
160
+ self.down_tr256 = DownTransition(128, 2, elu, dropout=True)
161
+ # self.up_tr256 = UpTransition(256, 256, 2, elu, dropout=True)
162
+ # self.up_tr128 = UpTransition(256, 128, 2, elu, dropout=True)
163
+ # self.up_tr64 = UpTransition(128, 64, 1, elu)
164
+ # self.up_tr32 = UpTransition(64, 32, 1, elu)
165
+ # self.out_tr = OutputTransition(32, elu, nll)
166
+ self.pool=AdaptivePoolTransition(256,6)
167
+ # The network topology as described in the diagram
168
+ # in the VNet paper
169
+ # def __init__(self):
170
+ # super(VNet, self).__init__()
171
+ # self.in_tr = InputTransition(16)
172
+ # # the number of convolutions in each layer corresponds
173
+ # # to what is in the actual prototxt, not the intent
174
+ # self.down_tr32 = DownTransition(16, 2)
175
+ # self.down_tr64 = DownTransition(32, 3)
176
+ # self.down_tr128 = DownTransition(64, 3)
177
+ # self.down_tr256 = DownTransition(128, 3)
178
+ # self.up_tr256 = UpTransition(256, 3)
179
+ # self.up_tr128 = UpTransition(128, 3)
180
+ # self.up_tr64 = UpTransition(64, 2)
181
+ # self.up_tr32 = UpTransition(32, 1)
182
+ # self.out_tr = OutputTransition(16)
183
+ def forward(self, x):
184
+ out16 = self.in_tr(x)
185
+ out32 = self.down_tr32(out16)
186
+ out64 = self.down_tr64(out32)
187
+ out128 = self.down_tr128(out64)
188
+ out256 = self.down_tr256(out128)
189
+ # out = self.up_tr256(out256, out128)
190
+ # out = self.up_tr128(out, out64)
191
+ # out = self.up_tr64(out, out32)
192
+ # out = self.up_tr32(out, out16)
193
+ # out = self.out_tr(out)
194
+ out=self.pool(out256)
195
+ return out
196
+ if __name__ == '__main__':
197
+ device='cuda' if torch.cuda.is_available() else 'cpu'
198
+ print(device)
199
+ net=HalfVNet().to(device)
200
+ image=torch.randn((1,1,128,320,320)).to(device)
201
+ with torch.no_grad():
202
+ out=net(image)
203
+ print(out.shape)
204
+ # m = nn.AdaptiveAvgPool3d((1))
205
+ # input = torch.randn(1, 64, 8, 9, 10)
206
+ # output = m(input)
207
+ # print(output.shape)
Task02_Heart/imagesTr/la_003.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:035de3923ffe7a8bd5e1d8ad8eefb57eeaf9a98074a8f70825aab88464e4e098
3
+ size 14914026
Task02_Heart/imagesTr/la_004.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a451b2a9c72fafe5385590dd6feadca4f5233ef158c655628c8de86e755ee7d5
3
+ size 12341450
Task02_Heart/imagesTr/la_005.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7788f025ddce3b1d44c50f18f6fa7cc8299c507d46e251b7245e17b361864d0
3
+ size 13909546
Task02_Heart/imagesTr/la_007.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ab0c2f2e1697b77a390660b4f647f4cc3b40a02532b55ce355f3736f911eff2
3
+ size 15468510
Task02_Heart/imagesTr/la_009.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f8606cd46ab9c46d22278c9b772c6e6678a3a94d80a41096ed92229cb82616d
3
+ size 11266306
Task02_Heart/imagesTr/la_010.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f7271b12dd9475470726efd96506d0b21cdba48eee31bab50123e30bc164775
3
+ size 13957912
Task02_Heart/imagesTr/la_011.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f74304a7601d7178a8ab21553e152427f2da076c7ac8883e6c7338c589939131
3
+ size 13892390
Task02_Heart/imagesTr/la_014.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:478c4347fa0dd5432ceb06674b411543b7ab65d44e76cf7ddc5421d495f98486
3
+ size 15509650
Task02_Heart/imagesTr/la_016.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1a76714f01fc683ab4294a573aa2fb40639a7fe94baa15c7099ff7de604952b
3
+ size 9021827
Task02_Heart/imagesTr/la_017.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:981d220f34111b6bd252c002debd54098a532f36ea26db02707a9db2287cc41b
3
+ size 13953197
Task02_Heart/imagesTr/la_018.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:435ef9afe456b9adb25897f5cd6629151c367b72de6ecb49bb3a1211e169b075
3
+ size 13421517
Task02_Heart/imagesTr/la_019.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c095979fb70cf26432467266de5a973b0bbe010e5dc3818d9505ba9e276aa15f
3
+ size 10917661
Task02_Heart/imagesTr/la_020.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d09261de17cd568e82bce71c9fc9dd1b15ebd191e29e312e729f5d0dae1b5364
3
+ size 11430083
Task02_Heart/imagesTr/la_021.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f77e09d51e4343465b860b041790df3d56af8df507aa365f18a53a67173ca94
3
+ size 11672526
Task02_Heart/imagesTr/la_022.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28ea9f402890502fe59dd284e3345fae70ba8a893ddae018dfdf75ff47c863d7
3
+ size 12547674
Task02_Heart/imagesTr/la_023.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e23b9a8b24eaa1eef6367da1a1f8ea66ae09037169ee1be8c25fd681e97520b
3
+ size 13117432
Task02_Heart/imagesTr/la_024.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6254499e52730b57b4a04345e8708f7c46c7284987c25c72288ec278ec0f72
3
+ size 13727279
Task02_Heart/imagesTr/la_026.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7788f025ddce3b1d44c50f18f6fa7cc8299c507d46e251b7245e17b361864d0
3
+ size 13909546
Task02_Heart/imagesTr/la_029.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5ef5bd7ebf3cf04036f4a654ecd4875ab25833fab211d1fcf3bf279d4294d79
3
+ size 13163134
Task02_Heart/imagesTr/la_030.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fddf07fe1e6ce51ea70a4fcaf47662f454a7e0c9f0995bc2f63ccc4c24cf2a1
3
+ size 12222499
Task02_Heart/imagesTr/pos_labels.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e694b1c70c9f19cc8517b0747d4642b1d2bfdd93f9ffcc80b54d3bcff24bddd
3
+ size 1088
Task02_Heart/imagesTs/la_001.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2acedfb7b525563814ca53d49288c562edb67f2381075301cb945c7aff35aed3
3
+ size 44506035
Task02_Heart/imagesTs/la_002.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05477be1b339567c8b304dafa737ad22be268024140197eeae9f14172a76e0c4
3
+ size 16059519
Task02_Heart/imagesTs/la_006.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dece42503044a926984a0d9903ef5ededf53708d478545e769fa7849a4f93083
3
+ size 41361304
Task02_Heart/imagesTs/la_008.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71180fbe9e1c87c0229aac1818c3f0012282d584e422a6c497fe6f117155d7a0
3
+ size 13323685
Task02_Heart/imagesTs/la_012.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d307861e0e77d205ab0b5d9f7b11befc9820e0f11b2c371ff8e7babf732fc488
3
+ size 16816197