diff --git a/.gitmodules b/.gitmodules index f4f33b52..3901d549 100644 --- a/.gitmodules +++ b/.gitmodules @@ -38,3 +38,7 @@ path = image_recognition_pose_estimation/docs url = https://github.com/tue-robotics/tue_documentation_python.git branch = master +[submodule "image_recognition_face_recognition/docs"] + path = image_recognition_face_recognition/docs + url = https://github.com/tue-robotics/tue_documentation_python.git + branch = master diff --git a/image_recognition/package.xml b/image_recognition/package.xml index d13f5293..2fd7b345 100644 --- a/image_recognition/package.xml +++ b/image_recognition/package.xml @@ -15,6 +15,7 @@ image_recognition_age_gender image_recognition_analysis image_recognition_color_extractor + image_recognition_face_recognition image_recognition_footwear image_recognition_msgs image_recognition_openface diff --git a/image_recognition_face_recognition/CMakeLists.txt b/image_recognition_face_recognition/CMakeLists.txt new file mode 100644 index 00000000..4319b516 --- /dev/null +++ b/image_recognition_face_recognition/CMakeLists.txt @@ -0,0 +1,20 @@ +cmake_minimum_required(VERSION 3.0.2) +project(image_recognition_face_recognition) + + +find_package(catkin REQUIRED COMPONENTS) + +catkin_python_setup() + +catkin_package() + +install(PROGRAMS + scripts/get_face_recognition + scripts/face_recognition_node + scripts/train_from_images + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) +if(CATKIN_ENABLE_TESTING) + find_package(catkin_lint_cmake REQUIRED) + catkin_add_catkin_lint_test() +endif() diff --git a/image_recognition_face_recognition/README.md b/image_recognition_face_recognition/README.md new file mode 100644 index 00000000..fde94d30 --- /dev/null +++ b/image_recognition_face_recognition/README.md @@ -0,0 +1,70 @@ +# image_recognition_face_recognition + +TO DO + +Face recognition with use of Facenet (https://github.com/timesler/facenet-pytorch/tree/master) +Paper can be found: [paper](https://arxiv.org/pdf/1503.03832.pdf) + +## Installation + +See https://github.com/tue-robotics/image_recognition + +Make sure you have installed **CUDA8**. + +bash commends will be available when merged + +## How-to + +### Run Face Recognition Node + +Run the command why using your camera as an input (use roscamera node): + + rosrun cv_camera cv_camera_node + rosrun image_recognition_face_recognition face_recognition_node image:=/cv_camera/image_raw + +### ROS Node + +Run the image_recognition_face_detection node in one terminal (Specify the dlib and openface_net path as ROS parameter): + + rosrun image_recognition_face_detection face_recognition_node + +Run the rqt annotation client (https://github.com/tue-robotics/image_recognition_rqt) + + rosrun image_recognition_rqt annotation_gui + +Setup the service by clicking the gear wheel in the top-right corner. Select the `/annotate` services exposed by the openface ros node. Also don't forget to set-up your labels. + +![Configuration](doc/config.png) + +Now draw a rectangle around the face you would like to learn. The face recognizer will find the biggest face in the image and store a representation for this face. + +![Annotate](doc/annotate.png) + +Now select the label and you will see that the openface ros node stores the face (console output node): + + [INFO] [WallTime: 1478636380.407308] Succesfully learned face of 'rokus' + +Learn as many faces as you want .. + +Next step is starting the image_recognition_Rqt test gui (https://github.com/tue-robotics/image_recognition_rqt) + + rosrun image_recognition_rqt test_gui + +Again configure the service you want to call with the gear-wheel in the top-right corner of the screen. If everything is set-up, draw a rectangle in the image and ask the service for detections: + +![Test](doc/test_face.png) + +You will see that the result of the detection will prompt in a dialog combo box. Also the detections will be drawn on the image. + + +### Command line + +Command line interface to test the detection / recognition based on an image: + + usage: get_face_recognition IMAGE [-k ALIGN_PATH] [-s NET_PATH] [-v] + +Run the command on an example image: + + rosrun image_recognition_face_recognition get_face_recognition `rospack find image_recognition_face_recognition`/doc/example.png + +![Example](doc/example.png) \ No newline at end of file diff --git a/image_recognition_face_recognition/docs b/image_recognition_face_recognition/docs new file mode 160000 index 00000000..e81558db --- /dev/null +++ b/image_recognition_face_recognition/docs @@ -0,0 +1 @@ +Subproject commit e81558db940b4e13b67d84919f55e942a5ceec0f diff --git a/image_recognition_face_recognition/package.xml b/image_recognition_face_recognition/package.xml new file mode 100644 index 00000000..97d5ea87 --- /dev/null +++ b/image_recognition_face_recognition/package.xml @@ -0,0 +1,38 @@ + + + + image_recognition_face_recognition + 0.0.1 + The image_recognition_face_recognition package + + iason + + MIT + + catkin + + python3-setuptools + + roscpp + cv_bridge + diagnostic_updater + image_recognition_msgs + image_recognition_util + python-dlib + python3-numpy + python3-opencv + rospy + + catkin_lint_cmake + + python3-sphinx + python-sphinx-autoapi-pip + python-sphinx-rtd-theme-pip + python3-yaml + + + + + diff --git a/image_recognition_face_recognition/rosdoc.yaml b/image_recognition_face_recognition/rosdoc.yaml new file mode 100644 index 00000000..d6daeddf --- /dev/null +++ b/image_recognition_face_recognition/rosdoc.yaml @@ -0,0 +1,3 @@ +- builder: sphinx + sphinx_root_dir: docs + name: Python API diff --git a/image_recognition_face_recognition/scripts/face_recognition_node b/image_recognition_face_recognition/scripts/face_recognition_node new file mode 100755 index 00000000..8e93c573 --- /dev/null +++ b/image_recognition_face_recognition/scripts/face_recognition_node @@ -0,0 +1,274 @@ +#!/usr/bin/env python + +import os +import sys +import diagnostic_updater +import rospy +from cv_bridge import CvBridge, CvBridgeError +from sensor_msgs.msg import RegionOfInterest, Image +from std_srvs.srv import Empty +from image_recognition_msgs.msg import ( + Recognition, + Recognitions, + CategoryProbability, + CategoricalDistribution, +) +from image_recognition_msgs.srv import Recognize, Annotate +from image_recognition_util import image_writer +from image_recognition_face_recognition.facenet_recognition import Facenet_recognition + + +class OpenfaceROS: + def __init__( + self, + align_path, + net_path, + save_images_folder, + topic_save_images, + service_save_images, + topic_publish_result_image, + service_publish_result_image, + ): + """ + OpenfaceROS class that wraps the FaceRecognizer in a ROS node + + :param align_path: dlib face align path + :param net_path: openface neural net path + :param save_images_folder: path where to store the images + :param topic_save_images: whether to save images originated from image topic callback + :param service_save_images: whether to save images originated from a service call + :param topic_publish_result_image: whether to publish images originated from image topic callback + :param service_publish_result_image: whether to publish images originated from a serice call + """ + + # Openface ROS + self._face_recognizer = Facenet_recognition() + self._save_images_folder = save_images_folder + self._topic_save_images = topic_save_images + self._service_save_images = service_save_images + self._topic_publish_result_image = topic_publish_result_image + self._service_publish_result_image = service_publish_result_image + + self._bridge = CvBridge() + self._annotate_srv = rospy.Service("annotate", Annotate, self._annotate_srv) + self._recognize_srv = rospy.Service("recognize", Recognize, self._recognize_srv) + self._clear_srv = rospy.Service("clear", Empty, self._clear_srv) + + self._image_subscriber = rospy.Subscriber("image", Image, self._image_callback) + self._recognitions_publisher = rospy.Publisher("recognitions", Recognitions, queue_size=10) + + if not self._save_images_folder and (self._topic_save_images or self._service_save_images): + rospy.logerr("~save_images_folder is not defined but we would like to save images ...") + rospy.signal_shutdown("") + + if self._topic_publish_result_image or self._service_publish_result_image: + self._result_image_publisher = rospy.Publisher("result_image", Image, queue_size=10) + + rospy.loginfo("OpenfaceROS initialized:") + rospy.loginfo(" - dlib_align_path=%s", align_path) + rospy.loginfo(" - openface_net_path=%s", net_path) + rospy.loginfo(" - save_images_folder=%s", save_images_folder) + rospy.loginfo(" - topic_save_images=%s", topic_save_images) + rospy.loginfo(" - service_save_images=%s", service_save_images) + rospy.loginfo(" - topic_publish_result_image=%s", topic_publish_result_image) + rospy.loginfo(" - service_publish_result_image=%s", service_publish_result_image) + + def _annotate_srv(self, req): + # Convert to opencv image + """ + Annotate service callback that trains the face of the user + + :param req: Face annotation request + :return: Empty + """ + try: + bgr_image = self._bridge.imgmsg_to_cv2(req.image, "bgr8") + except CvBridgeError as e: + raise Exception("Could not convert to opencv image: %s" % str(e)) + + for annotation in req.annotations: + roi_image = bgr_image[ + annotation.roi.y_offset: annotation.roi.y_offset + annotation.roi.height, + annotation.roi.x_offset: annotation.roi.x_offset + annotation.roi.width, + ] + + if self._save_images_folder: + image_writer.write_annotated(self._save_images_folder, roi_image, annotation.label, True) + + try: + self._face_recognizer.train(roi_image, annotation.label) + except Exception as e: + raise Exception("Could not get representation of face image: %s" % str(e)) + + rospy.loginfo("Succesfully learned face of '%s'" % annotation.label) + + return {} + + def _clear_srv(self, req): + """ + Service to clear the trained faces + + :param req: Empty + :return: Empty + """ + self._face_recognizer.clear_trained_faces() + return {} + + def _get_recognitions(self, image_msg, save_images, publish_images): + # Convert to opencv image + """ + Recognize service callback + + :param req: The input image + :return: Recognitions + """ + try: + bgr_image = self._bridge.imgmsg_to_cv2(image_msg, "bgr8") + except CvBridgeError as e: + raise Exception("Could not convert to opencv image: %s" % str(e)) + + # Write raw image + if save_images: + image_writer.write_raw(self._save_images_folder, bgr_image) + + images = [] + labels = [] + # Added this line for testing with some labels + labels = ["jason", "kona"] + + # Call facebet neural network in two stages + face_recognitions = self._face_recognizer.face_detection(bgr_image) + distance, labels_pred = self._face_recognizer.detection_recognition(bgr_image, labels, train=True) + + # Fill recognitions + recognitions = [] + + # rospy.loginfo("Face recognitions: %s", face_recognitions) + + label_idx = 0 + for fr in face_recognitions: + import math + + face_recognition = [math.floor(xi) for xi in fr] + if save_images: + label = labels_pred[label_idx] + roi_image = bgr_image[ + face_recognition[2]: face_recognition[3], + face_recognition[0]: face_recognition[1], + ] + image_writer.write_annotated(self._save_images_folder, roi_image, label, False) + images.append(roi_image) + labels.append(label) + label = labels_pred[label_idx] + distance_fr = distance[label_idx] + recognitions.append( + Recognition( + categorical_distribution=CategoricalDistribution( + unknown_probability=0.0, # TODO: When is it unknown? + probabilities=[ + # This line needs some changing + CategoryProbability(label=label, probability=1.0 / (distance_fr + 0.001)) for l2 in face_recognition + ], + ), + roi=RegionOfInterest( + x_offset=face_recognition[0], + y_offset=face_recognition[1], + width=face_recognition[2] - face_recognition[0], + height=face_recognition[3] - face_recognition[1], + ), + ) + ) + label_idx = label_idx + 1 + + annotated_original_image = image_writer.get_annotated_cv_image(bgr_image, recognitions) + if save_images: + image_writer.write_estimations( + self._save_images_folder, + images, + labels, + annotated_original_image, + suffix="_face_recognition", + ) + + if publish_images: + self._result_image_publisher.publish(self._bridge.cv2_to_imgmsg(annotated_original_image, "bgr8")) + + # Service response + return recognitions + + def _image_callback(self, image_msg): + # Comment this exception for beeter debbuging + try: + recognitions = self._get_recognitions( + image_msg, + save_images=self._topic_save_images, + publish_images=self._topic_publish_result_image, + ) + except Exception as e: + rospy.logerr(str(e)) + return + + self._recognitions_publisher.publish(Recognitions(header=image_msg.header, recognitions=recognitions)) + + def _recognize_srv(self, req): + recognitions = self._get_recognitions( + req.image, + save_images=self._service_save_images, + publish_images=self._service_publish_result_image, + ) + + # Service response + return {"recognitions": recognitions} + + def restore_trained_faces(self, file_name): + self._face_recognizer.restore_trained_faces(file_name) + + +if __name__ == "__main__": + rospy.init_node("face_recognition") + try: + dlib_shape_predictor_path = os.path.expanduser( + rospy.get_param( + "~align_path", + "~/openface/models/dlib/shape_predictor_68_face_landmarks.dat", + ) + ) + openface_neural_network_path = os.path.expanduser( + rospy.get_param("~net_path", "~/openface/models/openface/nn4.small2.v1.t7") + ) + save_images = rospy.get_param("~save_images", True) + topic_save_images = rospy.get_param("~topic_save_images", False) + service_save_images = rospy.get_param("~service_save_images", True) + topic_publish_result_image = rospy.get_param("~topic_publish_result_image", True) + service_publish_result_image = rospy.get_param("~service_publish_result_image", True) + + db = rospy.get_param("~db", None) + if db: + db = os.path.expanduser(db) + + save_images_folder = None + if save_images: + save_images_folder = os.path.expanduser(rospy.get_param("~save_images_folder", "/tmp/openface")) + except KeyError as e: + rospy.logerr("Parameter %s not found" % e) + sys.exit(1) + + image_recognition_openface = OpenfaceROS( + dlib_shape_predictor_path, + openface_neural_network_path, + save_images_folder, + topic_save_images, + service_save_images, + topic_publish_result_image, + service_publish_result_image, + ) + if db: + rospy.loginfo("loading face database from %s", db) + image_recognition_openface.restore_trained_faces(db) + + updater = diagnostic_updater.Updater() + updater.setHardwareID("none") + updater.add(diagnostic_updater.Heartbeat()) + rospy.Timer(rospy.Duration(1), lambda event: updater.force_update()) + + rospy.spin() diff --git a/image_recognition_face_recognition/scripts/get_face_recognition b/image_recognition_face_recognition/scripts/get_face_recognition new file mode 100755 index 00000000..b34c6e2a --- /dev/null +++ b/image_recognition_face_recognition/scripts/get_face_recognition @@ -0,0 +1,54 @@ +#!/usr/bin/env python +from __future__ import print_function + +import argparse + +import cv2 +from image_recognition_msgs.msg import Recognition +from image_recognition_face_recognition.facenet_recognition import Facenet_recognition +# Assign description to the help doc +import math +from sensor_msgs.msg import RegionOfInterest + +from image_recognition_util import image_writer + +parser = argparse.ArgumentParser(description='Get face recognitions') + +# Add arguments +parser.add_argument('image', type=str, help='Image') +parser.add_argument('-k', '--align_path', type=str, help='DLib Align path', required=False, + default="~/openface/models/dlib/shape_predictor_68_face_landmarks.dat") +parser.add_argument('-s', '--net_path', type=str, help='Openface neural network path', required=False, + default='~/openface/models/openface/nn4.small2.v1.t7') +parser.add_argument('-d', '--db', type=argparse.FileType('r'), help='Load already trained faces db from file') +parser.add_argument( + '-v', '--verbose', help="Increase output verbosity", action="store_true") +args = parser.parse_args() + +# Read the image +img = cv2.imread(args.image) + +# Create openface interface +face_recognizer = Facenet_recognition() + +if args.db: + face_recognizer.restore_trained_faces(args.db) + +recognized_faces = face_recognizer.face_detection(img) +print(recognized_faces) + +recognitions = [] +for fr in recognized_faces: + face_recognition = [math.floor(xi) for xi in fr] + recognitions.append(Recognition( + roi=RegionOfInterest( + x_offset=face_recognition[0], + y_offset=face_recognition[1], + width=face_recognition[2] - face_recognition[0], + height=face_recognition[3] - face_recognition[1], + ) + ) +) + annotated_original_image = image_writer.get_annotated_cv_image(img, recognitions) +cv2.imshow("result", annotated_original_image) +cv2.waitKey() diff --git a/image_recognition_face_recognition/scripts/train_from_images b/image_recognition_face_recognition/scripts/train_from_images new file mode 100755 index 00000000..11abd33e --- /dev/null +++ b/image_recognition_face_recognition/scripts/train_from_images @@ -0,0 +1,70 @@ +#!/usr/bin/env python +import os +import cv2 +import logging +from argparse import ArgumentParser, Action, ArgumentTypeError, FileType +from image_recognition_openface.face_recognizer import FaceRecognizer + + +logging.basicConfig() +logger = logging.getLogger(__name__) + + +def main(modeldir, outfile, align_path, net_path, verbose): + if verbose: + logger.setLevel(logging.DEBUG) + else: + logger.setLevel(logging.INFO) + + face_recognizer = FaceRecognizer(align_path, net_path) + + dirs = [o for o in os.listdir(modeldir) if os.path.isdir(os.path.join(modeldir, o))] + for cat in dirs: + logger.info('loading images for %s', cat) + path = os.path.join(modeldir, cat) + + logger.debug('loading images from %s', path) + for fname in os.listdir(path): + f = os.path.join(path, fname) + logger.debug('processing %s', f) + + img = cv2.imread(f, 1) # load as color + + try: + logger.debug('training...') + face_recognizer.train(img, cat) + logger.debug('success!') + except Exception: + logger.exception('face_recognizer failed to process %s', f) + + logger.info('saving database...') + face_recognizer.save_trained_faces(outfile) + logger.info('done!') + + +class ReadableDir(Action): + def __call__(self,parser, namespace, values, option_string=None): + prospective_dir=values + if not os.path.isdir(prospective_dir): + raise ArgumentTypeError("readable_dir:{0} is not a valid path".format(prospective_dir)) + if os.access(prospective_dir, os.R_OK): + setattr(namespace,self.dest,prospective_dir) + else: + raise ArgumentTypeError("readable_dir:{0} is not a readable dir".format(prospective_dir)) + + +if __name__ == '__main__': + parser = ArgumentParser(description='Train openface from a database of images') + + parser.add_argument('modeldir', action=ReadableDir, help='Directory with folders for each category') + parser.add_argument('outfile', type=FileType('w'), help='Where to output the trained faces database') + + parser.add_argument('-k', '--align_path', type=str, help='DLib Align path', required=False, + default="~/openface/models/dlib/shape_predictor_68_face_landmarks.dat") + parser.add_argument('-s', '--net_path', type=str, help='Openface neural network path', required=False, + default='~/openface/models/openface/nn4.small2.v1.t7') + + parser.add_argument('-v', '--verbose', action='store_true') + + args = parser.parse_args() + main(**vars(args)) diff --git a/image_recognition_face_recognition/setup.py b/image_recognition_face_recognition/setup.py new file mode 100644 index 00000000..f849d00f --- /dev/null +++ b/image_recognition_face_recognition/setup.py @@ -0,0 +1,6 @@ +from catkin_pkg.python_setup import generate_distutils_setup +from setuptools import setup + +d = generate_distutils_setup(packages=["image_recognition_face_recognition"], package_dir={"": "src"}) + +setup(**d) diff --git a/image_recognition_face_recognition/src/image_recognition_face_recognition/facenet_recognition.py b/image_recognition_face_recognition/src/image_recognition_face_recognition/facenet_recognition.py new file mode 100644 index 00000000..dfe04c68 --- /dev/null +++ b/image_recognition_face_recognition/src/image_recognition_face_recognition/facenet_recognition.py @@ -0,0 +1,201 @@ +import psutil +import rospy +import torch +from facenet_pytorch import MTCNN, InceptionResnetV1 +from facenet_pytorch.models.mtcnn import MTCNN + + +class TrainedFace: + def __init__(self, label): + """ + A custom struct to store the names and the embedded representations (tensors) of people + """ + self.label = label + self.representations = [] + + def get_label(self) -> str: + """ + A getter for the labels of the struct + """ + return self.label + + def get_representations(self): + """ + A getter for the embeddings of the struct + """ + return self.representations + + +class Facenet_recognition: + def __init__(self): + """ + Constructor for the list which contains the TrainedFace structure + """ + self._trained_faces = [] + + def print_memory_usage(self, label): + """ + Prints the memory usage as a metric + """ + mem_info = psutil.Process().memory_info() + rospy.loginfo( + f"{label: <10}: vms={mem_info.vms / (1024 * 1024)}, rss={mem_info.rss / (1024 * 1024)}" + ) + + def face_detection(self, img): + """ + Returns the index of the trained face + + :param img: inpute image + :return: the bounding boxes of coordinations of the faces it detects + """ + detector = MTCNN(keep_all=True) + # Keep the landmarks for future use + boxes, _, landmarks = detector.detect(img, landmarks=True) + return boxes + + def _get_dists(self, embeddings): + """ + Returns min l2 distance of the identified face (compared with the database of know faces) + + :param embeddings: the embedded representation(s) of a face(s) + :return: the min distance(s) of the embedded vector compared with the database faces + :return: the corresponding label(s) + """ + dist_per_emb_final = [] + dist = [] + dist_per_emb = [] + + min_of_emb_final = [] + + min_index_list_per_emb = [] + min_value_list_per_emb = [] + + for e2 in embeddings: + for e1 in self._trained_faces: + for e3 in e1.representations: + dist_per_emb.append(abs(e3 - e2).norm().item()) + dist.append(dist_per_emb) + rospy.loginfo(f"{dist_per_emb} dist_per_emb") + dist_per_emb = [] + dist_per_emb_final.append(dist) + dist = [] + + rospy.loginfo(f"{dist_per_emb_final} dist_per_emb_final") + for i in dist_per_emb_final: + min_of_emb = [min(j) for j in i] + rospy.loginfo(f"{min_of_emb} min_of_emb") + min_of_emb_final.append(min_of_emb) + rospy.loginfo(f"{min_of_emb_final} min_of_emb_final") + + for idx in min_of_emb_final: + rospy.loginfo(f"{idx} idx") + min_index_list_per_emb.append(idx.index(min(idx))) + min_value_list_per_emb.append(min(idx)) + rospy.loginfo(f"{min_index_list_per_emb}, min_index_list_per_emb") + rospy.loginfo(f"{min_value_list_per_emb}, min_index_list") + + labelling = [self._trained_faces[i].get_label() for i in min_index_list_per_emb] + rospy.loginfo(f"{labelling}, {min_value_list_per_emb}") + + return min_value_list_per_emb, labelling + + def detection_recognition(self, img, labels, train): + """ + Returns min l2 distance of the identified face (compared with the database of know faces) + + :param img: the target image collected from camera + :param images: NA/ a list with all img (why this is needed?) + :param labels: NA/ a list with all labels (this will be used during integration) + :param save_images: NA/ a folder with all saved images (why this is needed?) + :param train: flag to train during inference time + :return: the min distance(s) of the embedded vector compared with the database faces + :return: the corresponding label(s) + """ + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + rospy.loginfo(f"Running on device: {device}") + + mtcnn = MTCNN( + keep_all=True, + image_size=160, + margin=0, + min_face_size=20, + thresholds=[0.6, 0.7, 0.7], + factor=0.709, + post_process=True, + device=device, + ) + + resnet = InceptionResnetV1(pretrained="vggface2").eval().to(device) + + x_aligned = mtcnn(img) + print(type(x_aligned)) + print(x_aligned.size(), "x_aligned size (1st NN output)") + x_aligned = x_aligned.cuda() # add this line + embeddings = resnet(x_aligned).detach().cpu() + rospy.loginfo(f"{embeddings.size()}, {type(embeddings)}, embeddings size") + + if not self._trained_faces: + nam = 0 + for emb in embeddings: + index = self._get_trained_face_index(labels[nam]) + if index == -1: + self._trained_faces.append(TrainedFace(labels[nam])) + + self._trained_faces[index].representations.append(emb) + nam = nam + 1 + + # try: + dist, labelling = self._get_dists(embeddings) + # if dist > 1: + # in this case we should ask for a label + rospy.loginfo(f"{labels[0]}, {labelling}, label[0],labelling") + + if train: + idx_label = 0 + for emb in embeddings: + rospy.loginfo(f"{idx_label}, idx_label") + self.train(emb, labelling[idx_label]) + idx_label = idx_label + 1 + # except: + rospy.loginfo(f"{len(self._trained_faces)}") + return dist, labelling + + def _get_trained_face_index(self, label): + """ + Returns the index of the trained face + + :param label: label of the trained face + :return: the index of the face in the self._trained faces list + """ + for i, f in enumerate(self._trained_faces): + if f.label == label: + return i + return -1 + + def train(self, face_representation, name): + """ + Adds a face to the trained faces, creates a vector representation and adds this + + :param image: Embedded representation of the image + :param name: The label of the face + """ + index = self._get_trained_face_index(name) + if index == -1: + rospy.loginfo(f"We do not know this face") + self._trained_faces.append(TrainedFace(name)) + + self._trained_faces[index].representations.append(face_representation) + rospy.loginfo(f"Trained faces:") + for trained_face in self._trained_faces: + rospy.loginfo( + f"Label: {trained_face.get_label()}, Representations: {len(trained_face.get_representations())}" + ) + + +if __name__ == "__main__": + """ + Runs the whole code. It is not needed if it runs from a ros node. + """ + face_detection = Facenet_recognition() + face_detection.detection_recognition()