-
Notifications
You must be signed in to change notification settings - Fork 0
/
test.py
77 lines (52 loc) · 1.76 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 18:46:09 2020
@author: dell
"""
import cv2, os
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
import time
os.chdir(r"C:\Users\dell\Desktop\Facial KeyPoint detection")
# Load the saved model
from keras.models import load_model
model = load_model('./model1.h5') # <-- Saved model path
def detect_points(face_img):
me = np.array(face_img)/255
x_test = np.expand_dims(me, axis=0)
x_test = np.expand_dims(x_test, axis=3)
y_test = model.predict(x_test)
label_points = (np.squeeze(y_test)*48)+48
return label_points
# Load haarcascade
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
dimensions = (96, 96)
# Enter the path to your test image
img = cv2.imread('./sample.jpg')
default_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img, 1.3, 5)
# faces = face_cascade.detectMultiScale(gray_img, 4, 6)
faces_img = np.copy(gray_img)
plt.rcParams["axes.grid"] = False
all_x_cords = []
all_y_cords = []
for i, (x,y,w,h) in enumerate(faces):
h += 10
w += 10
x -= 5
y -= 5
just_face = cv2.resize(gray_img[y:y+h,x:x+w], dimensions)
cv2.rectangle(faces_img,(x,y),(x+w,y+h),(255,0,0),1)
scale_val_x = w/96
scale_val_y = h/96
label_point = detect_points(just_face)
all_x_cords.append((label_point[::2]*scale_val_x)+x)
all_y_cords.append((label_point[1::2]*scale_val_y)+y)
plt.imshow(just_face, cmap='gray')
plt.plot(label_point[::2], label_point[1::2], 'ro', markersize=5)
plt.show()
plt.imshow(default_img)
plt.plot(all_x_cords, all_y_cords, 'wo', markersize=3)
plt.show()