-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
91 lines (70 loc) · 3.35 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from matplotlib import pyplot as plt
from collections import defaultdict, Counter
from lib.face_detection import load_face_detector, detect_faces
from lib.emotion_recognition import load_emotion_model, predict_emotion
from lib.body_pose_estimation import load_pose_model, estimate_pose, draw_keypoints, analyze_pose
def main():
cap = cv2.VideoCapture('data/video/25snemotion.mp4')
face_cascade = load_face_detector()
emotion_model = load_emotion_model()
font = ImageFont.truetype("arial.ttf", 24)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cv2.namedWindow('Emotion Detector', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Emotion Detector', width, height)
diagnosis_interval = 10
fps = cap.get(cv2.CAP_PROP_FPS)
frames_per_interval = int(fps * diagnosis_interval)
emotions_dict = defaultdict(list)
current_frame = 0
pose_model = load_pose_model()
while True:
ret, frame = cap.read()
if not ret:
break
# Pose estimation ve keypoints çizimi
keypoints = estimate_pose(pose_model, frame)
if keypoints is not None and len(keypoints) > 0: # Keypoints kontrolü
draw_keypoints(frame, keypoints)
pose_analysis = analyze_pose(keypoints) # Pose analizi yapılıyor
else:
pose_analysis = "Keypoints yetersiz" # Keypoints yetersizse alternatif metin
# Yüz tanıma ve duygu analizi
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detect_faces(face_cascade, gray)
frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(frame_pil)
for (x, y, w, h) in faces:
face_image = gray[y:y+h, x:x+w]
emotion_label = predict_emotion(emotion_model, face_image)
label_map = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral']
predicted_emotion = label_map[emotion_label]
draw.text((x, max(0, y - 10)), predicted_emotion, font=font, fill=(0, 255, 0))
interval = current_frame // frames_per_interval
emotions_dict[interval].append(predicted_emotion)
# Pose analysis sonucunu ekleyin
if pose_analysis: # pose_analysis'in geçerli bir değere sahip olduğundan emin olun
draw.text((10, 30), pose_analysis, font=font, fill=(255, 0, 0))
frame_with_text = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
cv2.imshow('Emotion and Pose Detector', frame_with_text)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
current_frame += 1
cap.release()
cv2.destroyAllWindows()
# Analyze and present emotion analysis results
for interval, emotions in emotions_dict.items():
emotion_count = Counter(emotions)
print(f"Time Interval {interval * diagnosis_interval} - {(interval + 1) * diagnosis_interval} seconds:")
print(emotion_count)
plt.figure(figsize=(10, 4))
plt.bar(emotion_count.keys(), emotion_count.values())
plt.title(f"Emotion Distribution from {interval * diagnosis_interval} to {(interval + 1) * diagnosis_interval} seconds")
plt.xlabel('Emotions')
plt.ylabel('Count')
plt.show()
if __name__ == '__main__':
main()