Skip to content

Commit 0d39b37

Browse files
committed
images added
1 parent d8e07a5 commit 0d39b37

File tree

9 files changed

+143
-642
lines changed

9 files changed

+143
-642
lines changed

assets/images/screen.jpg

-67.6 KB
Loading

assets/images/screen.png

-126 KB
Binary file not shown.

lungunetmodel_new.py

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
"""
2+
deactivate
3+
conda.bat deactivate
4+
LungUNETCPUEnv\Scripts\activate
5+
python lungunetmodel_new.py
6+
"""
7+
import numpy as np # linear algebra
8+
import os
9+
import cv2
10+
import warnings
11+
warnings.filterwarnings('ignore')
12+
import tensorflow as tf
13+
14+
"""
15+
from keras.models import *
16+
from keras.layers import *
17+
from keras.optimizers import *
18+
"""
19+
from keras import backend as keras
20+
from keras.preprocessing.image import ImageDataGenerator
21+
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
22+
23+
24+
def dice_coef(y_true, y_pred):
25+
y_true_f = tf.keras.flatten(y_true)
26+
y_pred_f = tf.keras.flatten(y_pred)
27+
intersection = tf.keras.sum(y_true_f * y_pred_f)
28+
return (2. * intersection + 1) / (tf.keras.sum(y_true_f) + tf.keras.sum(y_pred_f) + 1)
29+
30+
def dice_coef_loss(y_true, y_pred):
31+
return -dice_coef(y_true, y_pred)
32+
33+
def unet(input_size=(256,256,1)):
34+
inputs = Input(input_size)
35+
36+
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
37+
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
38+
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
39+
40+
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
41+
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
42+
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
43+
44+
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
45+
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
46+
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
47+
48+
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
49+
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
50+
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
51+
52+
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
53+
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
54+
55+
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
56+
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
57+
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
58+
59+
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
60+
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
61+
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
62+
63+
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
64+
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
65+
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
66+
67+
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
68+
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
69+
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
70+
71+
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
72+
73+
return Model(inputs=[inputs], outputs=[conv10])
74+
75+
model = unet(input_size=(512,512,1))
76+
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss,
77+
metrics=[dice_coef, 'binary_accuracy'])
78+
# model.summary()
79+
80+
weight_path="cxr_reg_weights.best.hdf5"
81+
model_weights_path = os.path.join(ROOT_DIR,"Weights",weight_path)
82+
model.load_weights(model_weights_path)
83+
84+
"""
85+
Shapes that you wish to resize to
86+
"""
87+
88+
Shape_X,Shape_Y=512,512
89+
90+
def read_image(img_path):
91+
image = cv2.imread(img_path)
92+
image = cv2.resize(image,(Shape_Y,Shape_X))
93+
return image
94+
95+
96+
def get_preds(image):
97+
prep_unet_input_img_1 = image.reshape(1,Shape_X,Shape_Y,1)
98+
prep_unet_input_img = (prep_unet_input_img_1-127.0)/127.0
99+
pred_img = model.predict(prep_unet_input_img)
100+
pred_img_preprocessed_1 = np.squeeze(pred_img)
101+
pred_img_preprocessed = (pred_img_preprocessed_1*255>127).astype(np.int8)
102+
res = cv2.bitwise_and(image,image,mask = pred_img_preprocessed)
103+
return res,pred_img_preprocessed
104+
cv2.imwrite(save_path,res)
105+
106+
107+
if __name__ == '__main__':
108+
INP = os.path.join(ROOT_DIR,"Sample_Inputs")
109+
INP_RESHAPED = os.path.join(ROOT_DIR,"Sample_Inputs_Reshaped")
110+
RES = os.path.join(ROOT_DIR,"Sample_Masked_Results")
111+
MASK_PATH = os.path.join(ROOT_DIR,"Sample_Lung_Masks")
112+
113+
114+
create_folders([INP,INP_RESHAPED,RES])
115+
"""
116+
Images Output :
117+
Original Reshaped Image
118+
Superimposed Lungs Segmentation
119+
"""
120+
input_files = os.listdir(INP)
121+
for i,f in enumerate(input_files):
122+
img = read_image(os.path.join(INP,f))
123+
reshaped_img = deepcopy(img)
124+
segmented_output,mask = get_preds(reshaped_img)
125+
126+
cv2.imwrite(os.path.join(INP_RESHAPED,f),reshaped_img )
127+
cv2.imwrite(os.path.join(RES,f),segmented_output)
128+
cv2.imwrite(os.path.join(MASK_PATH ,f),mask)

requirements.txt

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
numpy
2-
tensorflow-cpu==2.4.1
3-
keras
2+
tensorflow-cpu==2.5.0
3+
# tensorflow-cpu==2.1.0 # ImportError: Keras requires TensorFlow 2.2 or higher. Install TensorFlow via `pip install tensorflow`
4+
# tensorflow-cpu==2.4.1 # AttributeError: module 'tensorflow.compat.v2.__internal__' has no attribute 'tf2'
5+
# tensorflow-cpu==2.4.0 # AttributeError: module 'tensorflow.compat.v2.__internal__' has no attribute 'tf2'
6+
7+
# keras AttributeError: module 'keras.utils.generic_utils' has no attribute 'populate_dict_with_module_objects'
8+
keras==2.4.0
49
# tensorflow-cpu==2.5.0
510
# tensorflow==2.1.0
611
# tensorflow==2.4.0

0 commit comments

Comments
 (0)