-
Notifications
You must be signed in to change notification settings - Fork 0
/
face_keypt_detec.py
95 lines (68 loc) · 2.86 KB
/
face_keypt_detec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 17:30:49 2020
@author: dell
"""
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import shuffle
os.chdir(r"C:\Users\dell\Desktop\Facial KeyPoint detection")
def data_loader():
data_frame = pd.read_csv('training.csv')
data_frame['Image'] = data_frame['Image'].apply(lambda i: np.fromstring(i, sep=' '))
data_frame = data_frame.fillna(method='ffill')
# data_frame = data_frame.dropna() # Get only the data with 15 keypoints
imgs_array = np.vstack(data_frame['Image'].values)/ 255.0
imgs_array = imgs_array.astype(np.float32) # Normalize, target values to (0, 1)
imgs_array = imgs_array.reshape(-1, 96, 96, 1)
labels_array = data_frame[data_frame.columns[:-1]].values
labels_array = (labels_array - 48) / 48 # Normalize, traget cordinates to (-1, 1)
labels_array = labels_array.astype(np.float32)
return imgs_array, labels_array
# imgs, labels = data_loader()
# print(imgs.shape)
# print(labels.shape)
# n=0
# labels[n] = (labels[n]*48)+48
# image = np.squeeze(imgs[n])
# plt.imshow(image, cmap='gray')
# plt.plot(labels[n][::2], labels[n][1::2], 'ro')
# plt.show()
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, GlobalAveragePooling2D, Activation
from keras.layers import Flatten, Dense
from keras.layers.normalization import BatchNormalization
from keras import optimizers
from keras.callbacks import ModelCheckpoint, History
# from keras.optimizers import Adam
def the_model():
model = Sequential()
model.add(Conv2D(16, (3,3), padding='same', activation='relu', input_shape=X_train.shape[1:])) # Input shape: (96, 96, 1)
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(32, (3,3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(64, (3,3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(128, (3,3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(256, (3,3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
# Convert all values to 1D array
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(30))
return model
X_train, y_train = data_loader()
epochs = 60
batch_size = 64
model = the_model()
hist = History()
checkpointer = ModelCheckpoint(filepath='checkpoint1.hdf5',
verbose=1, save_best_only=True)
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
model_fit = model.fit(X_train, y_train, validation_split=0.2, epochs=epochs, batch_size=batch_size, callbacks=[checkpointer, hist], verbose=1)
model.save('model1.h5')