-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtraining_generator.py
126 lines (103 loc) · 5.06 KB
/
training_generator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dropout, Dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import preprocess_input
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.applications import VGG16, Xception, VGG19, MobileNetV2, InceptionV3
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import SGD
from PIL import Image
import face_recognition
import os
import shutil
from tensorflow.python.keras.callbacks import EarlyStopping
def create_model(size=125):
model = VGG16(include_top=False, input_shape=(size, size, 3))
# model = VGG19(include_top=False, input_shape=(size, size, 3))
# mark loaded layers as not trainable
for layer in model.layers:
layer.trainable = False
flat1 = Flatten()(model.layers[-1].output)
class1 = Dense(256, activation='relu')(flat1)
class2 = Dense(128, activation='relu')(class1)
output = Dense(2, activation='softmax')(class2)
# define new model
model = Model(inputs=model.inputs, outputs=output)
# model = Sequential([
# Conv2D(50, (3, 3), activation='relu', input_shape=(size, size, 3)),
# MaxPooling2D(2, 2),
# Conv2D(50, (3, 3), activation='relu'),
# MaxPooling2D(2, 2),
# Flatten(),
# Dropout(0.3),
# Dense(50, activation='relu'),
# Dense(2, activation='softmax')
# ])
model.compile(optimizer='adam',
loss='binary_crossentropy', metrics=['acc'])
return model
def train_model(training_path, validation_path, model_path, size=(125, 125)):
model = create_model()
train_datagen = ImageDataGenerator(rescale=1.0/255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(training_path,
batch_size=10,
target_size=size)
validation_datagen = ImageDataGenerator(rescale=1.0/255, preprocessing_function=preprocess_input)
validation_generator = validation_datagen.flow_from_directory(validation_path,
batch_size=10,
target_size=size)
os.makedirs(model_path, exist_ok=True)
os.makedirs(os.path.join(model_path, 'checkpoints'), exist_ok=True)
early_stopping = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=3, verbose=1, mode="auto")
model.fit(
train_generator, epochs=100, validation_data=validation_generator, callbacks=[early_stopping])
model.save('model/my_model')
def split_dataset(face_path, mask_path, training_path, validation_path):
recreate_dir(training_path)
recreate_dir(validation_path)
face_images = os.listdir(face_path)
split_face = int(len(face_images) * 0.7)
for face_image in face_images[:split_face]:
get_face(os.path.join(face_path, face_image),
os.path.join(training_path, 'face', face_image))
for face_image in face_images[split_face:]:
get_face(os.path.join(face_path, face_image),
os.path.join(validation_path, 'face', face_image))
mask_images = os.listdir(mask_path)
split_mask = int(len(mask_images) * 0.7)
for mask_image in mask_images[:split_mask]:
get_face(os.path.join(mask_path, mask_image),
os.path.join(training_path, 'mask', mask_image))
for mask_image in mask_images[split_mask:]:
get_face(os.path.join(mask_path, mask_image),
os.path.join(validation_path, 'mask', mask_image))
def recreate_dir(dir):
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=False, onerror=None)
os.makedirs(os.path.join(dir, 'face'), exist_ok=True)
os.makedirs(os.path.join(dir, 'mask'), exist_ok=True)
def get_face(orig, target, size=(125, 125)):
face_image_np = face_recognition.load_image_file(orig)
faces = face_recognition.face_locations(
face_image_np, model='hog')
for (top, right, bottom, left) in faces[:1]:
face_image_np = face_image_np[top:bottom, left:right]
if len(faces) == 0:
return
face_img = Image.fromarray(face_image_np)
face_img = face_img.resize(size)
face_img.save(target)
if __name__ == '__main__':
training_path = 'model/data/training'
validation_path = 'model/data/validation'
model_path = 'model'
# split_dataset('images/without_mask', 'images/with_mask',
# training_path, validation_path)
train_model(training_path, validation_path, model_path)