From 41dd814f27ec92f023cab6cbc2673a8a3a2cbfb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 08:39:20 +0200 Subject: [PATCH 01/10] Update summary - describe non-facial norm more - add more references for non-experts reader for possible follow up --- paper/paper.bib | 105 +++++++++++++++++++++++++++++++++++++++++++++++- paper/paper.md | 7 +++- 2 files changed, 109 insertions(+), 3 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 5635953..473fe04 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -103,4 +103,107 @@ @ARTICLE{otsu number={1}, pages={62-66}, doi={10.1109/TSMC.1979.4310076} -} \ No newline at end of file +} + +@article{kwonHighspeedCameraCharacterization2013, + title = {High-Speed Camera Characterization of Voluntary Eye Blinking Kinematics}, + author = {Kwon, Kyung-Ah and Shipley, Rebecca J. and Edirisinghe, Mohan and Ezra, Daniel G. and Rose, Geoff and Best, Serena M. and Cameron, Ruth E.}, + year = {2013}, + month = aug, + journal = {Journal of the Royal Society, Interface}, + volume = {10}, + number = {85}, + pages = {20130227}, + issn = {1742-5662}, + doi = {10.1098/rsif.2013.0227}, + langid = {english}, + pmcid = {PMC4043155}, + pmid = {23760297}, +} + +@article{vanderwerfBlinkRecoveryPatients2007, + title = {Blink {{Recovery}} in {{Patients}} with {{Bell}}'s {{Palsy}}: {{A Neurophysiological}} and {{Behavioral Longitudinal Study}}}, + shorttitle = {Blink {{Recovery}} in {{Patients}} with {{Bell}}'s {{Palsy}}}, + author = {VanderWerf, Frans and Reits, Dik and Smit, Albertine Ellen and Metselaar, Mick}, + year = {2007}, + month = jan, + journal = {Investigative Ophthalmology \& Visual Science}, + volume = {48}, + number = {1}, + pages = {203--213}, + issn = {1552-5783}, + doi = {10.1167/iovs.06-0499}, + urldate = {2024-04-16}, +} + +@article{nuuttilaDiagnosticAccuracyGlabellar2021, + title = {Diagnostic Accuracy of Glabellar Tap Sign for {{Parkinson}}'s Disease}, + author = {Nuuttila, Simo and Eklund, Mikael and Joutsa, Juho and Jaakkola, Elina and M{\"a}kinen, Elina and Honkanen, Emma A. and Lindholm, Kari and Noponen, Tommi and Ihalainen, Toni and Murtom{\"a}ki, Kirsi and Nojonen, Tanja and Levo, Reeta and Mertsalmi, Tuomas and Scheperjans, Filip and Kaasinen, Valtteri}, + year = {2021}, + journal = {Journal of Neural Transmission}, + volume = {128}, + number = {11}, + pages = {1655--1661}, + issn = {0300-9564}, + doi = {10.1007/s00702-021-02391-3}, + urldate = {2024-04-16}, +} + +@article{vanderwerfEyelidMovementsBehavioral2003, + title = {Eyelid Movements: Behavioral Studies of Blinking in Humans under Different Stimulus Conditions}, + shorttitle = {Eyelid Movements}, + author = {VanderWerf, Frans and Brassinga, Peter and Reits, Dik and Aramideh, Majid and {Ongerboer de Visser}, Bram}, + year = {2003}, + month = may, + journal = {Journal of Neurophysiology}, + volume = {89}, + number = {5}, + pages = {2784--2796}, + issn = {0022-3077}, + langid = {english}, +} + +@article{cruzSpontaneousEyeblinkActivity2011, + title = {Spontaneous Eyeblink Activity}, + author = {Cruz, Antonio A. V. and Garcia, Denny M. and Pinto, Carolina T. and Cechetti, Sheila P.}, + year = {2011}, + month = jan, + journal = {The Ocular Surface}, + volume = {9}, + number = {1}, + pages = {29--41}, + issn = {1542-0124}, + langid = {english}, + pmid = {21338567}, +} + +@article{volkInitialSeverityMotor2017, + title = {Initial Severity of Motor and Non-Motor Disabilities in Patients with Facial Palsy: An Assessment Using Patient-Reported Outcome Measures}, + shorttitle = {Initial Severity of Motor and Non-Motor Disabilities in Patients with Facial Palsy}, + author = {Volk, Gerd Fabian and Granitzka, Thordis and Kreysa, Helene and Klingner, Carsten M. and {Guntinas-Lichius}, Orlando}, + year = {2017}, + month = jan, + journal = {European archives of oto-rhino-laryngology: official journal of the European Federation of Oto-Rhino-Laryngological Societies (EUFOS): affiliated with the German Society for Oto-Rhino-Laryngology - Head and Neck Surgery}, + volume = {274}, + number = {1}, + pages = {45--52}, + issn = {1434-4726}, + doi = {10.1007/s00405-016-4018-1}, + abstract = {Patients with facial palsy (FP) not only suffer from their facial movement disorder, but also from social and psychological disabilities. These can be assessed by patient-reported outcome measures (PROMs) like the quality-of-life Short-Form 36 Item Questionnaire (SF36) or FP-specific instruments like the Facial Clinimetric Evaluation Scale (FaCE) or the Facial Disability Index (FDI). Not much is known about factors influencing PROMs in patients with FP. We identified predictors for baseline SF36, FaCE, and FDI scoring in 256 patients with unilateral peripheral FP using univariate correlation and multivariate linear regression analyses. Mean age was 52~{\textpm}~18~years. 153 patients (60~\%) were female. 90 patients (31~\%) and 176 patients (69~\%) were first seen {$<$}90 or {$>$}90~days after onset, respectively, i.e., with acute or chronic FP. House-Brackmann grading was 3.9~{\textpm}~1.4. FaCE subscores varied from 41~{\textpm}~28 to 71~{\textpm}~26, FDI scores from 65~{\textpm}~20 to 70~{\textpm}~22, and SF36 domains from 52~{\textpm}~20 to 80~{\textpm}~24. Older age, female gender, higher House-Brackmann grading, and initial assessment {$>$}90~days after onset were independent predictors for lower FaCE subscores and partly for lower FDI subscores (all p~{$<~$}0.05). Older age and female gender were best predictors for lower results in SF36 domains. Comorbidity was associated with lower SF General health perception and lower SF36 Emotional role (all p~{$<~$}0.05). Specific PROMs reveal that older and female patients and patients with chronic FP suffer particularly from motor and non-motor disabilities related to FP. Comorbidity unrelated to the FP could additionally impact the quality of life of patients with FP.}, + langid = {english}, + pmid = {27040558}, + keywords = {Bell's palsy,Disability Evaluation,Disabled Persons,Facial nerve,Facial nerve reconstruction,Facial Paralysis,Humans,Patient Reported Outcome Measures,Patient-oriented methods,Quality of life,Quality of Life,Surveys and Questionnaires} +} + +@article{louReviewAutomatedFacial2020, + title = {A {{Review}} on {{Automated Facial Nerve Function Assessment From Visual Face Capture}}}, + author = {Lou, Jianwen and Yu, Hui and Wang, Fei-Yue}, + year = {2020}, + month = feb, + journal = {IEEE Transactions on Neural Systems and Rehabilitation Engineering}, + volume = {28}, + number = {2}, + pages = {488--497}, + issn = {1558-0210}, + doi = {10.1109/TNSRE.2019.2961244}, +} diff --git a/paper/paper.md b/paper/paper.md index 111a7a4..3ef93ff 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -34,8 +34,11 @@ bibliography: paper.bib Analyzing facial features and expressions is a complex task in computer vision. The human face is intricate, with significant shape, texture, and appearance variations. -In medical contexts, facial structures that differ from the norm, such as those affected by paralysis, are particularly important to study and require precise analysis. -One area of interest is the subtle movements involved in blinking, a process that is not yet fully understood and needs high-resolution, time-specific analysis for detailed understanding. +In medical contexts, facial structures and movements that differ from the norm are particularly important to study and require precise analysis to understand the underlying conditions. +Given that solely the facial muscles, innervated by the facial nerve, are responsible for facial expressions, facial palsy can lead to severe impairments in facial movements [@volkInitialSeverityMotor2017;@louReviewAutomatedFacial2020]. + +One affected area of interest is the subtle movements involved in blinking [@vanderwerfBlinkRecoveryPatients2007;@nuuttilaDiagnosticAccuracyGlabellar2021;@vanderwerfEyelidMovementsBehavioral2003]. +It is an intricate spontaneous process that is not yet fully understood and needs high-resolution, time-specific analysis for detailed understanding [@kwonHighspeedCameraCharacterization2013;@cruzSpontaneousEyeblinkActivity2011]. However, a significant challenge is that many advanced computer vision techniques demand programming skills, making them less accessible to medical professionals who may not have these skills. The Jena Facial Palsy Toolbox (JeFaPaTo) has been developed to bridge this gap. It utilizes cutting-edge computer vision algorithms and offers a user-friendly interface for those without programming expertise. From 0c71627dc4008706a0c3573485684857955e481a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 08:43:47 +0200 Subject: [PATCH 02/10] Update summary - make usage of computer vision a bit clearer --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 3ef93ff..8ce70ae 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -39,7 +39,7 @@ Given that solely the facial muscles, innervated by the facial nerve, are respon One affected area of interest is the subtle movements involved in blinking [@vanderwerfBlinkRecoveryPatients2007;@nuuttilaDiagnosticAccuracyGlabellar2021;@vanderwerfEyelidMovementsBehavioral2003]. It is an intricate spontaneous process that is not yet fully understood and needs high-resolution, time-specific analysis for detailed understanding [@kwonHighspeedCameraCharacterization2013;@cruzSpontaneousEyeblinkActivity2011]. -However, a significant challenge is that many advanced computer vision techniques demand programming skills, making them less accessible to medical professionals who may not have these skills. +However, a significant challenge is that many computer vision techniques demand programming skills for automated extraction and analysis, making them less accessible to medical professionals who may not have these skills. The Jena Facial Palsy Toolbox (JeFaPaTo) has been developed to bridge this gap. It utilizes cutting-edge computer vision algorithms and offers a user-friendly interface for those without programming expertise. This toolbox is designed to make advanced facial analysis more accessible to medical experts, simplifying integration into their workflow. From 62329e171e67a328f91fe864a5e0cbd6375d2d00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 08:48:34 +0200 Subject: [PATCH 03/10] Update summary - smooth the transition between paragraphs - include references for the reader --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 8ce70ae..5fdd6af 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -44,7 +44,7 @@ The Jena Facial Palsy Toolbox (JeFaPaTo) has been developed to bridge this gap. It utilizes cutting-edge computer vision algorithms and offers a user-friendly interface for those without programming expertise. This toolbox is designed to make advanced facial analysis more accessible to medical experts, simplifying integration into their workflow. -The state of the eye closure is of high interest to medical experts, e.g., in the context of facial palsy or Parkinson's disease. +This simple-to-use tool could enable medical professionals to quickly establish the blinking behavior of patients, providing valuable insights into their condition, especially in the context of facial palsy or Parkinson's disease [@nuuttilaDiagnosticAccuracyGlabellar2021;@vanderwerfBlinkRecoveryPatients2007]. Due to facial nerve damage, the eye-closing process might be impaired and could lead to many undesirable side effects. Hence, more than a simple distinction between open and closed eyes is required for a detailed analysis. Factors such as duration, synchronicity, velocity, complete closure, the time between blinks, and frequency over time are highly relevant. From 80dbe330c3e6f0c3e5a9154a988b6bbc9911193d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 09:13:28 +0200 Subject: [PATCH 04/10] Update statement of need - include more existing approaches - one for highspeed (but only every 5ms) - two medical ones - create new subsection for statement of need as some kind of overview --- paper/paper.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 5fdd6af..918aa73 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -42,7 +42,7 @@ It is an intricate spontaneous process that is not yet fully understood and need However, a significant challenge is that many computer vision techniques demand programming skills for automated extraction and analysis, making them less accessible to medical professionals who may not have these skills. The Jena Facial Palsy Toolbox (JeFaPaTo) has been developed to bridge this gap. It utilizes cutting-edge computer vision algorithms and offers a user-friendly interface for those without programming expertise. -This toolbox is designed to make advanced facial analysis more accessible to medical experts, simplifying integration into their workflow. +This toolbox makes advanced facial analysis more accessible to medical experts, simplifying integration into their workflow. This simple-to-use tool could enable medical professionals to quickly establish the blinking behavior of patients, providing valuable insights into their condition, especially in the context of facial palsy or Parkinson's disease [@nuuttilaDiagnosticAccuracyGlabellar2021;@vanderwerfBlinkRecoveryPatients2007]. Due to facial nerve damage, the eye-closing process might be impaired and could lead to many undesirable side effects. @@ -58,6 +58,13 @@ This approach neglects relevant information such as the blink intensity, duratio Moreover, this simple classification approach does not factor in high temporal resolution video data, which is essential for a thorough analysis of the blinking process as most blinks are shorter than 100 ms. We developed `JeFaPaTo` to go beyond the simple eye state classification and offer a method to extract complete blinking intervals for detailed analysis. We aim to provide a custom tool that is easy for medical experts, abstracting the complexity of the underlying computer vision algorithms and high-temporal processing and enabling them to analyze blinking behavior without requiring programming skills. +An existing approach [@kwonHighspeedCameraCharacterization2013] for high temporal videos uses only every frame 5 ms and requires manual measuring of the upper and lower eyelid margins. +Other methods require additional sensors such as electromyography (EMG) or magnetic search coils to measure the eyelid movement [@vanderwerfBlinkRecoveryPatients2007;@vanderwerfEyelidMovementsBehavioral2003]. +Such sensors necessitate additional human resources and are unsuitable for routine clinical analysis. +`JeFaPaTo` is a novel approach that combines the advantages of high temporal resolution video data [@kwonHighspeedCameraCharacterization2013] and computer vision algorithms [@soukupovaRealTimeEyeBlink2016] +to analyze the blinking behavior. + +## Overview of JeFaPaTo `JeFaPaTo` is a Python-based [@python] program to support medical and psychological experts in analyzing blinking and facial features for high temporal resolution video data. The tool is split into two main parts: An extendable programming interface and a graphical user interface (GUI) entirely written in Python. From 641d247a791e47a37c06c8a42b199eeeeb621a66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 09:13:50 +0200 Subject: [PATCH 05/10] add missing `high` --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 918aa73..eeb3477 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -68,7 +68,7 @@ to analyze the blinking behavior. `JeFaPaTo` is a Python-based [@python] program to support medical and psychological experts in analyzing blinking and facial features for high temporal resolution video data. The tool is split into two main parts: An extendable programming interface and a graphical user interface (GUI) entirely written in Python. -The programming interface enables efficient processing of temporal resolution video data, automatically extracts selected facial features, and provides a set of analysis functions specialized for blinking analysis. +The programming interface enables efficient processing of high temporal resolution video data, automatically extracts selected facial features, and provides a set of analysis functions specialized for blinking analysis. The GUI offers non-programmers an intuitive way to use the analysis functions, visualize the results, and export the data for further analysis. `JeFaPaTo` is designed to be extendable by additional analysis functions and facial features and is under joint development by computer vision and medical experts to ensure high usability and relevance for the target group. From 2f136dd02b00668ff84c8733d6e009227cb8da5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 09:16:00 +0200 Subject: [PATCH 06/10] split rather long sentence --- paper/paper.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index eeb3477..a5c8f7c 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -74,7 +74,8 @@ The GUI offers non-programmers an intuitive way to use the analysis functions, v `JeFaPaTo` leverages the `mediapipe` library [@lugaresiMediaPipeFrameworkBuilding2019;@kartynnikRealtimeFacialSurface2019a] to extract facial landmarks and blend shape features from video data at 60 FPS (on modern hardware). With the landmarks, we compute the `EAR` (Eye-Aspect-Ratio) [@soukupovaRealTimeEyeBlink2016] for both eyes over the videos. -Additionally, `JeFaPaTo` detects blinks, matches left and right eye, and computes medically relevant statistics, a visual summary for the provided video, shown in \autoref{fig:summary}, and exports the data in various formats for further independent analysis. +Additionally, `JeFaPaTo` detects blinks, matches the left and right eye, and computes medically relevant statistics. +Furthermore, a visual summary for the video is provided in the GUI, shown in \autoref{fig:summary}, and the data can be exported in various formats for further independent analysis. The visual summary lets medical experts quickly get an overview of the blinking behavior. As shown in \autoref{fig:summary}, the blinks per minute are shown as a histogram over time in the upper axis, and the delay between blinks is shown in the right axis. The main plot comprises the scatter plot of the `EAR` score for the left and right eye, and the dots indicate the detected blinks, with the rolling mean and standard deviation shown as a line plot. From c7b50b2e3e5c0dc3b3b9bec18745d57f90842e8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 10:01:31 +0200 Subject: [PATCH 07/10] Fix section header -> medical -> medically --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index a5c8f7c..ad1c3d1 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -138,7 +138,7 @@ Upon data extraction, corrections to the blinking state can be made directly wit The D-Area displays the current video frame, given that the user supplies the original video. While this feature is optional, it helps manually correct the blinking state when required. -## Extracted Medical Relevant Statistics +## Extracted Medically Relevant Statistics We provided a set of relevant statistics for medical analysis of blinking behavior, which are valuable to healthcare experts. The `JeFaPaTo` software is being developed in partnership with medical professionals to guarantee the included statistics are relevant. From 47a85012dac6323618ce61859db7dd85bcfb7ef5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 12:11:26 +0200 Subject: [PATCH 08/10] Make api and gui distinction more clear - explain what api extendablilty means - make api usage more clear - make gui usage more clear --- paper/paper.bib | 34 ++++++++++++++++++++++++++++++++++ paper/paper.md | 10 ++++++---- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 473fe04..a1341f1 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -207,3 +207,37 @@ @article{louReviewAutomatedFacial2020 issn = {1558-0210}, doi = {10.1109/TNSRE.2019.2961244}, } + +@article{hochreiterMachineLearningBasedDetectingEyelid2023, + title = {Machine-{{Learning-Based Detecting}} of {{Eyelid Closure}} and {{Smiling Using Surface Electromyography}} of {{Auricular Muscles}} in {{Patients}} with {{Postparalytic Facial Synkinesis}}: {{A Feasibility Study}}}, + shorttitle = {Machine-{{Learning-Based Detecting}} of {{Eyelid Closure}} and {{Smiling Using Surface Electromyography}} of {{Auricular Muscles}} in {{Patients}} with {{Postparalytic Facial Synkinesis}}}, + author = {Hochreiter, Jakob and Hoche, Eric and Janik, Luisa and Volk, Gerd Fabian and Leistritz, Lutz and Anders, Christoph and {Guntinas-Lichius}, Orlando}, + year = {2023}, + month = jan, + journal = {Diagnostics}, + volume = {13}, + number = {3}, + pages = {554}, + publisher = {Multidisciplinary Digital Publishing Institute}, + issn = {2075-4418}, + doi = {10.3390/diagnostics13030554}, + urldate = {2023-03-15}, + langid = {english}, +} + +@article{chenSmartphoneBasedArtificialIntelligenceAssisted2021, + title = {Smartphone-{{Based Artificial Intelligence-Assisted Prediction}} for {{Eyelid Measurements}}: {{Algorithm Development}} and {{Observational Validation Study}}}, + shorttitle = {Smartphone-{{Based Artificial Intelligence-Assisted Prediction}} for {{Eyelid Measurements}}}, + author = {Chen, Hung-Chang and Tzeng, Shin-Shi and Hsiao, Yen-Chang and Chen, Ruei-Feng and Hung, Erh-Chien and Lee, Oscar K.}, + year = {2021}, + month = oct, + journal = {JMIR mHealth and uHealth}, + volume = {9}, + number = {10}, + pages = {e32444}, + issn = {2291-5222}, + doi = {10.2196/32444}, + langid = {english}, + pmcid = {PMC8538024}, + pmid = {34538776}, +} diff --git a/paper/paper.md b/paper/paper.md index ad1c3d1..8166d6f 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -67,10 +67,12 @@ to analyze the blinking behavior. ## Overview of JeFaPaTo `JeFaPaTo` is a Python-based [@python] program to support medical and psychological experts in analyzing blinking and facial features for high temporal resolution video data. -The tool is split into two main parts: An extendable programming interface and a graphical user interface (GUI) entirely written in Python. -The programming interface enables efficient processing of high temporal resolution video data, automatically extracts selected facial features, and provides a set of analysis functions specialized for blinking analysis. -The GUI offers non-programmers an intuitive way to use the analysis functions, visualize the results, and export the data for further analysis. -`JeFaPaTo` is designed to be extendable by additional analysis functions and facial features and is under joint development by computer vision and medical experts to ensure high usability and relevance for the target group. +We follow a two-way approach to encourage programmers and non-programmers to use the tool. +On the one hand, we provide a programming interface for efficiently processing high-temporal resolution video data, automatic facial feature extraction, and specialized blinking analysis functions. +This interface is extendable, allowing the easy addition of new or existing facial feature-based processing functions (e.g., mouth movement analysis [@hochreiterMachineLearningBasedDetectingEyelid2023] or MRD1/MRD2 [@chenSmartphoneBasedArtificialIntelligenceAssisted2021]). +On the other hand, we offer a graphical user interface (GUI) entirely written in Python to enable non-programmers to use the full analysis functions, visualize the results, and export the data for further analysis. +All functionalities of the programming interface are accessible through the GUI with additional input validations, making it easy for medical experts to use. +`JeFaPaTo` is designed to be extendable and transparent and is under joint development by computer vision and medical experts to ensure high usability and relevance for the target group. `JeFaPaTo` leverages the `mediapipe` library [@lugaresiMediaPipeFrameworkBuilding2019;@kartynnikRealtimeFacialSurface2019a] to extract facial landmarks and blend shape features from video data at 60 FPS (on modern hardware). With the landmarks, we compute the `EAR` (Eye-Aspect-Ratio) [@soukupovaRealTimeEyeBlink2016] for both eyes over the videos. From 73847d7ce94b4d48984073c02928c91d67cedfbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 12:13:53 +0200 Subject: [PATCH 09/10] Fix some citations styles --- paper/paper.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 8166d6f..ac84a1a 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -53,12 +53,12 @@ Such detailed analysis could help medical experts better understand the blinking # Statement of need To analyze the blinking behavior in detail, medical experts often use high-speed cameras to record the blinking process. -Existing tools modeling the eye state based on the Eye-Aspect-Ratio (EAR), such as [@soukupovaRealTimeEyeBlink2016], only classify the eye state as open or closed, requiring a labeled dataset for training a suitable classifier. +Existing tools modeling the eye state based on the Eye-Aspect-Ratio (EAR), such as @soukupovaRealTimeEyeBlink2016, only classify the eye state as open or closed, requiring a labeled dataset for training a suitable classifier. This approach neglects relevant information such as the blink intensity, duration, or partial blinks, which are crucial for a detailed analysis in a medical context. Moreover, this simple classification approach does not factor in high temporal resolution video data, which is essential for a thorough analysis of the blinking process as most blinks are shorter than 100 ms. We developed `JeFaPaTo` to go beyond the simple eye state classification and offer a method to extract complete blinking intervals for detailed analysis. We aim to provide a custom tool that is easy for medical experts, abstracting the complexity of the underlying computer vision algorithms and high-temporal processing and enabling them to analyze blinking behavior without requiring programming skills. -An existing approach [@kwonHighspeedCameraCharacterization2013] for high temporal videos uses only every frame 5 ms and requires manual measuring of the upper and lower eyelid margins. +An existing approach by @kwonHighspeedCameraCharacterization2013 for high temporal videos uses only every frame 5 ms and requires manual measuring of the upper and lower eyelid margins. Other methods require additional sensors such as electromyography (EMG) or magnetic search coils to measure the eyelid movement [@vanderwerfBlinkRecoveryPatients2007;@vanderwerfEyelidMovementsBehavioral2003]. Such sensors necessitate additional human resources and are unsuitable for routine clinical analysis. `JeFaPaTo` is a novel approach that combines the advantages of high temporal resolution video data [@kwonHighspeedCameraCharacterization2013] and computer vision algorithms [@soukupovaRealTimeEyeBlink2016] From d5a2b5be28d241875a3dcbbd01d12fb3949e66d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20B=C3=BCchner?= Date: Tue, 16 Apr 2024 12:16:08 +0200 Subject: [PATCH 10/10] Update Lugares et al. paper - replace arxiv with cvpr workshop paper --- paper/paper.bib | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index a1341f1..9477175 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -8,20 +8,14 @@ @article{soukupovaRealTimeEyeBlink2016 url = {https://api.semanticscholar.org/CorpusID:35923299}, } -@misc{lugaresiMediaPipeFrameworkBuilding2019, - title = {{{MediaPipe}}: {{A Framework}} for {{Building Perception Pipelines}}}, - shorttitle = {{{MediaPipe}}}, - author = {Lugaresi, Camillo and Tang, Jiuqiang and Nash, Hadon and McClanahan, Chris and Uboweja, Esha and Hays, Michael and Zhang, Fan and Chang, Chuo-Ling and Yong, Ming Guang and Lee, Juhyun and Chang, Wan-Teh and Hua, Wei and Georg, Manfred and Grundmann, Matthias}, - year = {2019}, - month = jun, - number = {arXiv:1906.08172}, - eprint = {1906.08172}, - primaryclass = {cs}, - publisher = {{arXiv}}, - doi = {10.48550/arXiv.1906.08172}, - archiveprefix = {arxiv}, +@inproceedings{lugaresiMediaPipeFrameworkBuilding2019, + title = {{{MediaPipe}}: {{A}} Framework for Perceiving and Processing Reality}, + booktitle = {Third Workshop on Computer Vision for {{AR}}/{{VR}} at {{IEEE}} Computer Vision and Pattern Recognition ({{CVPR}}) 2019}, + author = {Lugaresi, Camillo and Tang, Jiuqiang and Nash, Hadon and McClanahan, Chris and Uboweja, Esha and Hays, Michael and Zhang, Fan and Chang, Chuo-Ling and Yong, Ming and Lee, Juhyun and Chang, Wan-Teh and Hua, Wei and Georg, Manfred and Grundmann, Matthias}, + year = {2019} } + @article{kartynnikRealtimeFacialSurface2019a, title = {Real-Time {{Facial Surface Geometry}} from {{Monocular Video}} on {{Mobile GPUs}}}, author = {Kartynnik, Yury and Ablavatski, Artsiom and Grishchenko, Ivan and Grundmann, Matthias},