-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathcnn3D.py
294 lines (234 loc) · 11.8 KB
/
cnn3D.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
'''https://www.kaggle.com/sentdex/first-pass-through-data-w-3d-convnet
The data consists of many 2D "slices," which, when combined, produce a 3-dimensional rendering of whatever was scanned. In this case, that's the chest cavity of the patient. We've got CT scans of about 1500 patients, and then we've got another file that contains the labels for this data.'''
#handling data
import dicom # for reading dicom files
import os # for doing directory operations
import pandas as pd # for some simple data analysis (right now, just to load in the labels data and quickly reference it)
# Change this to wherever you are storing your data:
# IF YOU ARE FOLLOWING ON KAGGLE, YOU CAN ONLY PLAY WITH THE SAMPLE DATA, WHICH IS MUCH SMALLER
data_dir = '../input/sample_images/'
patients = os.listdir(data_dir)
labels_df = pd.read_csv('../input/stage1_labels.csv', index_col=0)
labels_df.head()
# iterating throught the patients to gather theior respective data
for patient in patients[:1]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
# a couple great 1-liners from: https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
print(len(slices),label)
print(slices[0])
#Alright, so we already know that we're going to absolutely need to resize this data. Being 512 x 512, I am already expecting all this data to be the same size, but let's see what we have from other patients too:
for patient in patients[:3]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
# a couple great 1-liners from: https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
print(slices[0].pixel_array.shape, len(slices))
# seeing the first slice
import matplotlib.pyplot as plt
for patient in patients[:1]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
# the first slice
plt.imshow(slices[0].pixel_array)
plt.show()
#Let's look at the first 12, and resize them with opencv. If you do not have opencv, do a pip install cv2
import cv2
import numpy as np
IMG_PX_SIZE = 150
for patient in patients[:1]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
fig = plt.figure()
for num,each_slice in enumerate(slices[:12]):
y = fig.add_subplot(3,4,num+1)
new_img = cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE))
y.imshow(new_img)
plt.show()
# we need to address the whole non-uniformity of depth next.
'''
what we have is really a big list of slices. What we need is to be able to just take any list of images, whether it's got 200 scans,
150 scans, or 300 scans, and set it to be some fixed number. Let's say we want to have 20 scans instead. How can we do this?
Well, first, we need something that will take our current list of scans, and chunk it into a list of lists of scans.
we can just average them together. My theory is that a scan is a few millimeters of actual tissue at most.
Thus, we can hopefully just average this slice together, and maybe we're now working with a centimeter or so.
If there's a growth there, it should still show up on scan.
This is just a theory, it has to be tested.
As we continue through this, however, you're hopefully going to see just how many theories we come up with,
and how many variables we can tweak and change to possibly get better results.
'''
import math
def chunks(l, n):
# Credit: Ned Batchelder
# Link: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def mean(l):
return sum(l) / len(l)
IMG_PX_SIZE = 150
HM_SLICES = 20
data_dir = '../input/sample_images/'
patients = os.listdir(data_dir)
labels_df = pd.read_csv('../input/stage1_labels.csv', index_col=0)
for patient in patients[:10]:
try:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
new_slices = []
slices = [cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / HM_SLICES)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
print(len(slices), len(new_slices))
except:
# some patients don't have labels, so we'll just pass on this for now
pass
for patient in patients[:10]:
try:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
new_slices = []
slices = [cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / HM_SLICES)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
if len(new_slices) == HM_SLICES-1:
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES+2:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
if len(new_slices) == HM_SLICES+1:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
print(len(slices), len(new_slices))
except Exception as e:
# again, some patients are not labeled, but JIC we still want the error if something
# else is wrong with our code
print(str(e))
''' to improve this chunking/averaging code.
We figured out a way to make sure our 3 dimensional data can be at any resolution we want or need.'''
'''
One major issue is these colors and ranges of data.
It's unclear to me whether or not a model would appreciate that. Even if we do a grayscale colormap in the imshow,
you'll see that some scans are just darker overall than others. This might be problematic and we might need to actually
normalize this dataset.I expect that, with a large enough dataset, this wouldn't be an actual issue, but,
with this size of data, it might be of huge importance.
'''
for patient in patients[:1]:
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
new_slices = []
slices = [cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / HM_SLICES)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
if len(new_slices) == HM_SLICES-1:
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices) == HM_SLICES+2:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
if len(new_slices) == HM_SLICES+1:
new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1] = new_val
fig = plt.figure()
for num,each_slice in enumerate(new_slices):
y = fig.add_subplot(4,5,num+1)
y.imshow(each_slice, cmap='gray')
plt.show()
'''
Preprocessing the data, to make the neural net code much simpler
Now, I will just make a slight modification to all of the code up to this point,
and add some new final lines to preprocess this data and save the array of arrays to a file:
'''
import numpy as np
import pandas as pd
import dicom
import os
import matplotlib.pyplot as plt
import cv2
import math
IMG_SIZE_PX = 50
SLICE_COUNT = 20
def chunks(l, n):
# Credit: Ned Batchelder
# Link: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def mean(a):
return sum(a) / len(a)
def process_data(patient,labels_df,img_px_size=50, hm_slices=20, visualize=False):
label = labels_df.get_value(patient, 'cancer')
path = data_dir + patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
new_slices = []
slices = [cv2.resize(np.array(each_slice.pixel_array),(img_px_size,img_px_size)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / hm_slices)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
if len(new_slices) == hm_slices-1:
new_slices.append(new_slices[-1])
if len(new_slices) == hm_slices-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices) == hm_slices+2:
new_val = list(map(mean, zip(*[new_slices[hm_slices-1],new_slices[hm_slices],])))
del new_slices[hm_slices]
new_slices[hm_slices-1] = new_val
if len(new_slices) == hm_slices+1:
new_val = list(map(mean, zip(*[new_slices[hm_slices-1],new_slices[hm_slices],])))
del new_slices[hm_slices]
new_slices[hm_slices-1] = new_val
if visualize:
fig = plt.figure()
for num,each_slice in enumerate(new_slices):
y = fig.add_subplot(4,5,num+1)
y.imshow(each_slice, cmap='gray')
plt.show()
if label == 1: label=np.array([0,1])
elif label == 0: label=np.array([1,0])
return np.array(new_slices),label
# stage 1 for real.
data_dir = '../input/sample_images/'
patients = os.listdir(data_dir)
labels = pd.read_csv('../input/stage1_labels.csv', index_col=0)
much_data = []
for num,patient in enumerate(patients):
if num % 100 == 0:
print(num)
try:
img_data,label = process_data(patient,labels,img_px_size=IMG_SIZE_PX, hm_slices=SLICE_COUNT)
#print(img_data.shape,label)
much_data.append([img_data,label])
except KeyError as e:
print('This is unlabeled data!')
np.save('muchdata-{}-{}-{}.npy'.format(IMG_SIZE_PX,IMG_SIZE_PX,SLICE_COUNT), much_data