-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathModel.py
135 lines (111 loc) · 7.19 KB
/
Model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Activation,Input,concatenate,Dropout,Conv2DTranspose,UpSampling2D
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras import backend as keras
def unet():
input_size = (256,256,3)
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model( inputs,conv10)
#model.compile(optimizer = Adam(learning_rate = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
def simple_model(input_shape):
inputs = Input(shape=input_shape)
# Encoder
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# Decoder
up1 = UpSampling2D(size=(2, 2))(pool2)
conv3 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1)
up2 = UpSampling2D(size=(2, 2))(conv3)
conv4 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
outputs = Conv2D(2, (1, 1), activation='softmax', padding='same')(conv4)
model = Model(inputs=inputs, outputs=outputs)
return model
def Unet1():
# Define the input shape
input_shape = (256, 256, 3)
# Define the number of filters for each convolutional layer
filters = [64, 128, 256, 512, 1024]
# Define the input tensor
inputs = Input(input_shape)
# Encoder Path
conv1 = Conv2D(filters[0], (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(filters[0], (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(filters[1], (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(filters[1], (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
conv3 = Conv2D(filters[2], (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(filters[2], (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
conv4 = Conv2D(filters[3], (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(filters[3], (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)
# Bottom
conv5 = Conv2D(filters[4], (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(filters[4], (3, 3), activation='relu', padding='same')(conv5)
# Decoder Path
up6 = Conv2DTranspose(filters[3], (2, 2), strides=(2, 2), padding='same')(conv5)
concat6 = concatenate([up6, conv4])
conv6 = Conv2D(filters[3], (3, 3), activation='relu', padding='same')(concat6)
conv6 = Conv2D(filters[3], (3, 3), activation='relu', padding='same')(conv6)
up7 = Conv2DTranspose(filters[2], (2, 2), strides=(2, 2), padding='same')(conv6)
concat7 = concatenate([up7, conv3])
conv7 = Conv2D(filters[2], (3, 3), activation='relu', padding='same')(concat7)
conv7 = Conv2D(filters[2], (3, 3), activation='relu', padding='same')(conv7)
up8 = Conv2DTranspose(filters[1], (2, 2), strides=(2, 2), padding='same')(conv7)
concat8 = concatenate([up8, conv2])
conv8 = Conv2D(filters[1], (3, 3), activation='relu', padding='same')(concat8)
conv8 = Conv2D(filters[1], (3, 3), activation='relu', padding='same')(conv8)
up9 = Conv2DTranspose(filters[0], (2, 2), strides=(2, 2), padding='same')(conv8)
concat9 = concatenate([up9, conv1])
conv9 = Conv2D(filters[0], (3, 3), activation='relu', padding='same')(concat9)
conv9 = Conv2D(filters[0], (3, 3), activation='relu', padding='same')(conv9)
outputs = Conv2D(2, (1, 1), activation='sigmoid')(conv9) #yesorno 1 else 2 for heatmap left r8 lane
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
return model