<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Neurotech</journal-id>
      <journal-title>JMIR Neurotechnology</journal-title>
      <issn pub-type="epub">2817-092X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v2i1e43387</article-id>
      <article-id pub-id-type="pmid">37435094</article-id>
      <article-id pub-id-type="doi">10.2196/43387</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>A Digital Telehealth System to Compute Myasthenia Gravis Core Examination Metrics: Exploratory Cohort Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Kubben</surname>
            <given-names>Pieter</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Ochs</surname>
            <given-names>Vincent</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Banf</surname>
            <given-names>Michael</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhao</surname>
            <given-names>Peng, PhD</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Garbey</surname>
            <given-names>Marc</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Surgery</institution>
            <institution>School of Medicine &amp; Health Sciences</institution>
            <institution>George Washington University</institution>
            <addr-line>2120 L St NW</addr-line>
            <addr-line>Washington, DC, 20037</addr-line>
            <country>United States</country>
            <phone>1 2815363178</phone>
            <email>garbeymarc@gmail.com</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7898-9136</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Joerger</surname>
            <given-names>Guillaume</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8380-5327</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Lesport</surname>
            <given-names>Quentin</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0009-0039-7064</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Girma</surname>
            <given-names>Helen</given-names>
          </name>
          <degrees>BS</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3235-9216</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>McNett</surname>
            <given-names>Sienna</given-names>
          </name>
          <degrees>BS</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0000-6259-907X</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Abu-Rub</surname>
            <given-names>Mohammad</given-names>
          </name>
          <degrees>BS</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3318-0522</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Kaminski</surname>
            <given-names>Henry</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8195-0141</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Surgery</institution>
        <institution>School of Medicine &amp; Health Sciences</institution>
        <institution>George Washington University</institution>
        <addr-line>Washington, DC</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>ORintelligence LLC</institution>
        <addr-line>Houston, TX</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Laboratoire des Sciences de l'Ingénieur pour l'Environnement (LaSIE UMR-CNRS 7356)</institution>
        <institution>University of La Rochelle</institution>
        <addr-line>La Rochelle</addr-line>
        <country>France</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Care Constitution Corporation</institution>
        <addr-line>Washington, DC</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Neurology &amp; Rehabilitation Medicine</institution>
        <institution>School of Medicine &amp; Health Sciences</institution>
        <institution>George Washington University</institution>
        <addr-line>Washington, DC</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Marc Garbey <email>garbeymarc@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>19</day>
        <month>4</month>
        <year>2023</year>
      </pub-date>
      <volume>2</volume>
      <elocation-id>e43387</elocation-id>
      <history>
        <date date-type="received">
          <day>12</day>
          <month>10</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>30</day>
          <month>12</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>27</day>
          <month>2</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>2</day>
          <month>3</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Marc Garbey, Guillaume Joerger, Quentin Lesport, Helen Girma, Sienna McNett, Mohammad Abu-Rub, Henry Kaminski. Originally published in JMIR Neurotechnology (https://neuro.jmir.org), 19.04.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Neurotechnology, is properly cited. The complete bibliographic information, a link to the original publication on https://neuro.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://neuro.jmir.org/2023/1/e43387" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Telemedicine practice for neurological diseases has grown significantly during the COVID-19 pandemic. Telemedicine offers an opportunity to assess digitalization of examinations and enhances access to modern computer vision and artificial intelligence processing to annotate and quantify examinations in a consistent and reproducible manner. The Myasthenia Gravis Core Examination (MG-CE) has been recommended for the telemedicine evaluation of patients with myasthenia gravis.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We aimed to assess the ability to take accurate and robust measurements during the examination, which would allow improvement in workflow efficiency by making the data acquisition and analytics fully automatic and thereby limit the potential for observation bias.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We used Zoom (Zoom Video Communications) videos of patients with myasthenia gravis undergoing the MG-CE. The core examination tests required 2 broad categories of processing. First, computer vision algorithms were used to analyze videos with a focus on eye or body motions. Second, for the assessment of examinations involving vocalization, a different category of signal processing methods was required. In this way, we provide an algorithm toolbox to assist clinicians with the MG-CE. We used a data set of 6 patients recorded during 2 sessions.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Digitalization and control of quality of the core examination are advantageous and let the medical examiner concentrate on the patient instead of managing the logistics of the test. This approach showed the possibility of standardized data acquisition during telehealth sessions and provided real-time feedback on the quality of the metrics the medical doctor is assessing. Overall, our new telehealth platform showed submillimeter accuracy for ptosis and eye motion. In addition, the method showed good results in monitoring muscle weakness, demonstrating that continuous analysis is likely superior to pre-exercise and postexercise subjective assessment.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>We demonstrated the ability to objectively quantitate the MG-CE. Our results indicate that the MG-CE should be revisited to consider some of the new metrics that our algorithm identified. We provide a proof of concept involving the MG-CE, but the method and tools developed can be applied to many neurological disorders and have great potential to improve clinical care.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>telehealth</kwd>
        <kwd>telemedicine</kwd>
        <kwd>myasthenia gravis</kwd>
        <kwd>ptosis</kwd>
        <kwd>diplopia</kwd>
        <kwd>deep learning</kwd>
        <kwd>computer vision</kwd>
        <kwd>eyes tracking</kwd>
        <kwd>neurological disease</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>With the COVID-19 pandemic, there was a rapid increase in the use of telemedicine in routine patient care [<xref ref-type="bibr" rid="ref1">1</xref>] and in clinical trials that moved to video evaluations to maintain subject follow-up [<xref ref-type="bibr" rid="ref2">2</xref>]. Telemedicine was already commonly used for acute stroke care and was in development for Parkinson disease, but the vast majority of neurologists were not using such approaches and were suddenly thrust into unfamiliar territory [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Diagnosis and monitoring of neuromuscular disorders, in particular, rely on a nuanced physical examination, and specialists would be particularly reticent to use telemedicine. However, telemedicine has great potential to provide improved assessment of aspects of neurological examinations, and facilitate patient monitoring and their education [<xref ref-type="bibr" rid="ref6">6</xref>], while reducing patient burden in attending in-person clinic visits and potentially increasing access. Further, there is great potential for rigorous video assessment to enhance clinical trial performance, which could reduce the burden on study participants and thereby enhance recruitment and retention.</p>
      <p>The Myasthenia Gravis Core Examination (MG-CE) [<xref ref-type="bibr" rid="ref7">7</xref>] was recommended for telemedicine evaluation of patients with myasthenia gravis (MG), and it involves specific aspects of neurological examinations critical to the comprehensive assessment of patients with MG. The National Institutes of Health Rare Disease Clinical Research Network dedicated to MG, MGNet, initiated an evaluation to assess the feasibility and validity of MG-CE for use in future clinical trials. These assessments were video recorded using the software Zoom (Zoom Video Communications), and we used the evaluations performed at George Washington University with the following 2 objectives: (1) assess workflow efficiency by making the data acquisition and analytics fully automatic and (2) evaluate the potential to quantitate the evaluations.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>MG-CE and Automatic Data Acquisition</title>
        <p>The study used recorded telemedicine evaluations of individuals with a clinical- and laboratory-confirmed diagnosis of MG. The patients were provided instructions regarding their position in relation to the cameras and level of illumination, and were told to follow the examiner’s instructions. We used videos of 6 subjects recorded twice within 7 days to develop our algorithms. One normal control subject was used to evaluate the methodology prior to evaluating MG subject videos.</p>
        <p>The MG-CE is summarized in <xref ref-type="table" rid="table1">Table 1</xref>, and a full description has been provided previously [<xref ref-type="bibr" rid="ref7">7</xref>]. The examination required 2 broad categories of processing: (1) the computer vision algorithm to analyze video focusing on eye or body motions and (2) the analysis of the voice signal, which requires a completely different category of signal processing methods. We describe successively each of the techniques used in these categories and summarize the digitalization process in <xref ref-type="table" rid="table2">Table 2</xref>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Myasthenia Gravis Core Examination exercises and evaluation metrics [<xref ref-type="bibr" rid="ref7">7</xref>].</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td>Variable</td>
                <td>Normal (0)</td>
                <td>Mild (1)</td>
                <td>Moderate (2)</td>
                <td>Severe (3)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Eyelid droop (ptosis)</td>
                <td>No ptosis</td>
                <td>Eyelid above the pupil</td>
                <td>Eyelid at the pupil</td>
                <td>Eyelid below the pupil</td>
              </tr>
              <tr valign="top">
                <td>Double vision (right/left)</td>
                <td>No diplopia with a gaze of 61 seconds</td>
                <td>Diplopia with a gaze of 11-60 seconds</td>
                <td>Diplopia with a gaze of 1-10 seconds</td>
                <td>Immediate diplopia</td>
              </tr>
              <tr valign="top">
                <td>Cheek puff</td>
                <td>Normal “seal”</td>
                <td>Transverse pucker</td>
                <td>Opposes lips but air escapes</td>
                <td>Cannot perform the exercise</td>
              </tr>
              <tr valign="top">
                <td>Tongue to cheek</td>
                <td>Normal: full convex deformity in the cheek</td>
                <td>Partial convex deformity in the cheek</td>
                <td>Able to move the tongue to the cheek, but no deformity</td>
                <td>Cannot perform the exercise</td>
              </tr>
              <tr valign="top">
                <td>Counting to 50</td>
                <td>No dysarthria at 50</td>
                <td>Dysarthria at 30-49</td>
                <td>Dysarthria at 10-29</td>
                <td>Dysarthria at 1-9</td>
              </tr>
              <tr valign="top">
                <td>Arm strength</td>
                <td>No drift for &gt;120 seconds</td>
                <td>Drift at 90-119 seconds</td>
                <td>Drift at 10-89 seconds</td>
                <td>Drift at 0-9 seconds</td>
              </tr>
              <tr valign="top">
                <td>Single-breath count</td>
                <td>Count of ≥30</td>
                <td>Count of 25-29</td>
                <td>Count of 20-24</td>
                <td>Count of &lt;20</td>
              </tr>
              <tr valign="top">
                <td>Sit-to-stand maneuver</td>
                <td>No difficulty</td>
                <td>Slow with effort but no hands</td>
                <td>Need to use hands</td>
                <td>Unable to stand unassisted</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Summary of our algorithm tool box to assist the clinician with the Myasthenia Gravis Core Examination.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="170"/>
            <col width="180"/>
            <col width="210"/>
            <col width="200"/>
            <col width="240"/>
            <thead>
              <tr valign="top">
                <td>Exercise</td>
                <td>Description</td>
                <td>Observation</td>
                <td>Metric</td>
                <td>Digital tool</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Ptosis</td>
                <td>Patients hold their gaze up for 60 seconds.</td>
                <td>Weakness of the upper eyelid and eyelid going above the pupil.</td>
                <td>Distance between the eyelid and the pupil, and distance between the upper and lower eyelids.</td>
                <td>High-definition camera and eye segmentation.</td>
              </tr>
              <tr valign="top">
                <td>Double vision</td>
                <td>Patients hold their gaze right and then left for 60 seconds.</td>
                <td>Misalignment of the eyes and moment of double vision.</td>
                <td>Track the distance between anatomic landmarks such as the upper/lower lid, and pupil and iris left and right boundaries.</td>
                <td>High-definition camera and eye segmentation.</td>
              </tr>
              <tr valign="top">
                <td>Cheek puff</td>
                <td>Patients puff their cheeks and hold it.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Assess muscle strength and fatiguability.</p>
                    </list-item>
                    <list-item>
                      <p>Extent of puffiness at baseline and versus external pressure placed on the cheeks.</p>
                    </list-item>
                    <list-item>
                      <p>Symmetry of cheek puff (left vs right).</p>
                    </list-item>
                  </list>
                </td>
                <td>Track face feature variation, mouth curvature, and dimension in particular.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Depth camera or Lidar.</p>
                    </list-item>
                    <list-item>
                      <p>High-definition camera with face landmark monitoring.</p>
                    </list-item>
                    <list-item>
                      <p>Track change of illumination in the region of interest.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Tongue pushing</td>
                <td>Patients use their tongue to push the cheek.</td>
                <td>Tongue muscle strength and symmetry.</td>
                <td>Track face feature variation, mouth curvature, dimension, and orientation in particular.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Depth camera or Lidar.</p>
                    </list-item>
                    <list-item>
                      <p>High-definition camera with face landmark monitoring.</p>
                    </list-item>
                    <list-item>
                      <p>Track change of illumination in the region of interest.</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Counting to 50</td>
                <td>Patients count out loud from 1 to 50.</td>
                <td>Assess for respiratory muscle fatigue and shortness of breath.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Loudness of the voice.</p>
                    </list-item>
                    <list-item>
                      <p>Various types of spectral analysis of the voice and mouth motion.</p>
                    </list-item>
                    <list-item>
                      <p>Energy metric of the voice.</p>
                    </list-item>
                  </list>
                </td>
                <td>Lip tracking and sound analysis of the exercise clip.</td>
              </tr>
              <tr valign="top">
                <td>Arm strength</td>
                <td>Patients hold their arms straight.</td>
                <td>Assess for muscle fatigue via sustained abduction of the arm.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Track body pose and different angles.</p>
                    </list-item>
                    <list-item>
                      <p>Length of time the patient can hold the arm in the pose.</p>
                    </list-item>
                    <list-item>
                      <p>Trajectory of the arm over time.</p>
                    </list-item>
                  </list>
                </td>
                <td>Pose detection on high-definition images.</td>
              </tr>
              <tr valign="top">
                <td>Single-breath test</td>
                <td>Patients count with only 1 breath.</td>
                <td>Assess for respiratory muscle fatigue.</td>
                <td>Length of the breath.</td>
                <td>Lip tracking and sound analysis of the exercise clip.</td>
              </tr>
              <tr valign="top">
                <td>Sit-to-stand maneuver</td>
                <td>Patients stand up with and without crossing their arms.</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Assess for muscle fatigue.</p>
                    </list-item>
                    <list-item>
                      <p>Ability of the patient to stand without using the arms for assistance.</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Body pose tracking.</p>
                    </list-item>
                    <list-item>
                      <p>Compare standing up speed between clips.</p>
                    </list-item>
                  </list>
                </td>
                <td> Pose detection on high-definition images.</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Deep Learning and Computer Vision Analysis</title>
        <sec>
          <title>Machine Learning to Track Body Landmarks and Face Landmarks</title>
          <p>Tracking faces or all body motions has become a standard tool [<xref ref-type="bibr" rid="ref8">8</xref>] thanks to publicly available deep learning libraries with a standard model (<xref rid="figure1" ref-type="fig">Figure 1</xref>). To track body positions during the test of arm position fatigue and the sit-to-stand maneuver (<xref rid="figure1" ref-type="fig">Figure 1</xref>), we used a deep learning model that is publicly available (the pretrained machine learning model BlazePose GHUM 3D from MediaPipe) (<xref rid="figure1" ref-type="fig">Figure 1</xref>) [<xref ref-type="bibr" rid="ref9">9</xref>]. For eye detection, we first needed to localize the face in the video frame.</p>
          <p>Among the most commonly used algorithms [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>], we chose OpenCV’s implementation of the Haar Cascade algorithm [<xref ref-type="bibr" rid="ref12">12</xref>], based on the detector from Lienhart et al [<xref ref-type="bibr" rid="ref13">13</xref>]. Our criteria to select the method were speed and reliability for real-time detection.</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>Pretrained machine learning models used with characteristic points.</p>
            </caption>
            <graphic xlink:href="neuro_v2i1e43387_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>To focus on the regions of interest (ROIs) of the eyes and lids, we used the pretrained DLib 68 points facial landmark detector that is based on the shape regression approach [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. It is a machine learning algorithm that places 68 characteristic points on a detected face. The model is pretrained on the I-BUG 300-W data set, which is comprised of 300 face pictures (<xref rid="figure1" ref-type="fig">Figure 1</xref>) [<xref ref-type="bibr" rid="ref16">16</xref>]. This software was used for the assessment of ptosis and eye position, as well as for the test of counting to 50 and the single-breath test in order to document lip reading and tracking of jaw motion (<xref ref-type="table" rid="table1">Table 1</xref>).</p>
          <p>Overall, both libraries provided robust results and could be used to annotate the video in real time for the ROIs. However, we found that the accuracy of the landmark points in the model of <xref rid="figure1" ref-type="fig">Figure 1</xref> obtained by this library was not adequate to provide metrics that could be used in eye motion assessment in the context of a standard telehealth session. Therefore, we developed a hybrid method that began from deep learning to identify the ROIs and refine the search for the pupil, eyelid, and iris as described next.</p>
        </sec>
        <sec>
          <title>Eye and Lid Image Segmentation</title>
          <p>Assessment of ptosis and ocular motility requires precise tracking of the eyelid, pupil, and iris. Precise metrics of these measures have been developed [<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]. Established techniques to detect the iris location [<xref ref-type="bibr" rid="ref21">21</xref>] are the circular Hough transform [<xref ref-type="bibr" rid="ref22">22</xref>] and the Daughman algorithm method [<xref ref-type="bibr" rid="ref23">23</xref>]. However, we found that these methods lack robustness due to their insensitivity to the low resolution of the ROIs of the eyes, poor control for illumination of the subject, and specific eye geometry consequent to ptosis. The eye image in a standard Zoom meeting may not be bigger than about 40 pixels wide and 20 pixels high. Liu et al [<xref ref-type="bibr" rid="ref24">24</xref>] assessed eye movements for a computer-aided examination, but with highly controlled data and a highly controlled environment. We did not have optimum control of telehealth footage with patients at home, and the eye region has only one-tenth, at best, of the image frame dimension. Therefore, we took a more versatile approach that began with the ROI given by the previous deep learning library that we had used and then concentrated on a local search of the iris boundary, pupil center, and upper/lower eyelid (<xref rid="figure2" ref-type="fig">Figure 2</xref>). Since we started from a good estimate of the ROI for the eye, we used a combination of a local gradient method and clustering technique to compute the spatial coordinate and distance between landmarks of interest, and we have described this in the Results section. There are 2 classes of assessment depending on whether we compute the geometric dimension on an individual image or the dynamic of eye motion on video clips. We retrieved, for example, the relaxation time of the eyelid versus equilibrium, with some of the patients performing both eye exercises (<xref rid="figure2" ref-type="fig">Figure 2</xref>). However, there is no mention of such a metric in the core examination [<xref ref-type="bibr" rid="ref11">11</xref>]. The incorporation of this new information in the standard core remains to be determined.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Approximations on ptosis to assess the field of view: distance between the upper and lower eye lids (left), eye area opening (center), and distance from the upper lid to the pupil (right).</p>
            </caption>
            <graphic xlink:href="neuro_v2i1e43387_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Body Image Segmentation</title>
          <p>To have reproducible results with the entire view of the body during the examination, we tested our telehealth platform Inteleclinic on 1 patient and several control subjects. The pretrained machine learning model BlazePose GHUM 3D from MediaPipe [<xref ref-type="bibr" rid="ref9">9</xref>] has been evaluated extensively, so we only provide some examples of the results obtained with the MG-CE. The arms of the patient are extended for 2 minutes during the exercise, and we used the segments joining the landmark point (12) to (14) to track the right arm position and the landmark point (11) to (13) to track the left arm position (<xref rid="figure1" ref-type="fig">Figure 1</xref>B). We computed the angle formed by the arm’s segment as described above and the horizontal line going through the landmark points (11) and (12) of the upper torso in the model (<xref rid="figure1" ref-type="fig">Figure 1</xref>B). If the arms stay horizontal, the 2 angles we track for the model (<xref rid="figure1" ref-type="fig">Figure 1</xref>B) should be approximately zero. As the arm strength of the patient may fatigue during the exercise, the arms fall from the horizontal position, and the angle would decrease and become negative. A similar approach was used for the sit-to-stand exercise by tracking the hip landmarks (23) and (24) of the body motion model (<xref rid="figure1" ref-type="fig">Figure 1</xref>B).</p>
        </sec>
        <sec>
          <title>Cheek Deformation</title>
          <p>The ROI for cheek deformation was the polygon delimited by points (3), (15), (13), and (5) of model 1 (<xref rid="figure1" ref-type="fig">Figure 1</xref>A) for the cheek puff exercise. We could restrict this ROI to one half of the polygon for the tongue-to-cheek push exercise that is only performed on one side. As we aimed to reconstruct the local curvature of the cheek during the test involving (3) and (4) that can lead to cheek deformation, we used a depth camera and computed the depth map to assess the contour of the deformation in the ROI. When it came to the depth map, our first approach was to use a depth camera that could directly reconstruct the local curvature of the surface seen. The depth camera Intel Realsense D435 (Intel) has, according to the vendor, a relative accuracy below 2% for a distance less than 2 meters. This technology uses infrared and stereo cameras to analyze the deformation of a projected pattern of a scene and reconstruct from this information the depth, but requires camera calibration [<xref ref-type="bibr" rid="ref25">25</xref>-<xref ref-type="bibr" rid="ref28">28</xref>]. All tests were performed in realistic conditions for telehealth, that is, the distance of the face from the camera was 1 meter at minimum and the patient was directly facing the camera.</p>
          <p>The second approach we used was to assess a pure computer vision technique that works on a standard video. Our objective was to define basic information regarding when the cheek deformation starts, when it ends, and how it may become weaker during the examination period. In practice, this is what the medical doctor may grade during a telehealth consultation.</p>
          <p>The first solution exploits the local skin appearance alterations as the cheek becomes dilated [<xref ref-type="bibr" rid="ref29">29</xref>]. We could then compute the ROI “centered” on the cheek area where we expect the deformation to be most significant and the average pixel value of the blue dimension of the RGB code. To track the ROI, we used the mouth location and external boundary of the cheek that can be recovered from the model (<xref rid="figure1" ref-type="fig">Figure 1</xref>A). We could then track the average value over time during the exercise, that is, before the push to its end. We show in the Results section the limitation of this method that is a priori not robust with respect to light conditions and may depend on skin color.</p>
          <p>The second solution is based on the observation that cheek deformation impacts the mouth geometry. For example, in the cheek puff exercise, the mouth is closed and invariably the lip shape features change from those in the rest position. In the one-side tongue-to-cheek push, the upper lip is deformed. All these changes can be monitored in time easily by tracking the relative position of the points in the facial model that mark the mouth (<xref rid="figure1" ref-type="fig">Figure 1</xref>A).</p>
          <p>We describe our computer vision methods based on an analysis performed with 3 different formats of videos. The first was acquired with our new telehealth platform using a high-definition camera with a patient who has a normal cheek puff response. The second was acquired on a control subject with a cell phone camera (Apple 13 system, Apple Inc), and the third was extracted from the MGNet data set. We tested the impact of diversity with White subjects, subjects with dark sun tan, and subjects who were African American. We demonstrate in the Results section which metrics appeared to provide the best assessment.</p>
        </sec>
        <sec>
          <title>Voice Analysis</title>
          <p>Our goal was to assess breathing and change in speech in patients with MG from analyzing counting to 50 and single-breath count. Dysarthria is not a simple concept and is classified in several ways [<xref ref-type="bibr" rid="ref30">30</xref>]. Shortness of breath was easier to define but could be compromised by multiple factors. Shortness of breath and pulmonary function can be assessed from speech as appreciated by others [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. Previous studies have used machine learning and artificial intelligence (AI) techniques that require large training sets, and they are not specific to any neurological disorder or specific to a voice acquisition protocol.</p>
          <p>A good example of dysarthria detection has been published previously [<xref ref-type="bibr" rid="ref32">32</xref>]. The rate of success of a neural network is modest, that is, about 70% when competing with standard diagnostic performance. An alternative solution is to use a fractal feature as reported previously [<xref ref-type="bibr" rid="ref33">33</xref>]. This methodology seems to reach a greater accuracy of about 90% and does not require a training set.</p>
          <p>Lip and jaw movements are related to dysarthria [<xref ref-type="bibr" rid="ref34">34</xref>]. We are not aware of any systematic study that combines automatic lip motion tracking and speech digital analysis to assess breathing and dysarthria in patients with MG. We assessed more than half a dozen algorithms producing various sound metrics to check for the potential best voice analysis candidate to assess MG patients. As the analysis of the pitch of voice did not show any outliers in the data set and the energy metric analysis was impacted by the environment and control of the exercise, we restricted the description to the most promising algorithm. To compute voice features, we used the following steps. We separated the interval of time when the subject spoke from when the subject was silent. We used the MATLAB function “detectSpeech” [<xref ref-type="bibr" rid="ref35">35</xref>] on the original signal. The function “detectSpeech” provides the start and end times of each so called “speech segment.” The frequency of signal acquisition was about 1000 Hz. For comparison, we used our own custom-made algorithm to extract speech segments using sampling of size 60 of the voice signal. The signal now had an equivalent frequency of acquisition of about 17 Hz. We then used averaging on each sample of the original signal to dampen noise. The signal was then smoother, and we could use a threshold to filter out noise without building up a large number of small gaps corresponding to “no sound.” We looked in the sound track of “counting to 50” exercises for the largest 50 time intervals of sound above noise level. All voice features presented below were computed on the sound track that contained speech only.</p>
          <p>We present below the list of voice features we computed systematically for each of the sound tracks for both voice exercises. All these individual metrics or combinations of metrics were candidates to grade the severity of symptoms. The Results section reports which metric worked the best. The features are as follows:</p>
          <list list-type="bullet">
            <list-item>
              <p>Loudness of voice: Loudness was computed based on the algorithms defined in the ITU-R BS.1770-4 and EBU R 128 standards. The loudness of voice was integrated over all speech segments.</p>
            </list-item>
            <list-item>
              <p>Pitch or fundamental frequency of voice: The pitch was computed for each speech segment. The speech of a typical adult man will have a fundamental frequency from 85 Hz to 155 Hz and that of a typical adult woman will have a fundamental frequency from 165 Hz to 255 Hz.</p>
            </list-item>
            <list-item>
              <p>Spectral energy on a frequency interval: Both voice exercises were considered as breathing exercises, so we computed the L2 norm spectral energy of the voice signal over all speech segments in a frequency window that focused on the breathing rate (5 Hz to 25 Hz).</p>
            </list-item>
            <list-item>
              <p>Teager-Kaiser energy: It was used in tone detection [<xref ref-type="bibr" rid="ref36">36</xref>].</p>
            </list-item>
            <list-item>
              <p>Spectral entropy of the voice signal: Spectral entropy is a measure of spectral power distribution. Spectral entropy’s concept is based on Shannon entropy or information entropy. Spectral entropy treats the signal’s normalized power distribution in the frequency domain as a probability distribution and calculates the Shannon entropy of it. The Shannon entropy has been used for feature extraction in fault detection and diagnosis [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Spectral entropy has also been widely used as a feature in speech recognition [<xref ref-type="bibr" rid="ref39">39</xref>] and biomedical signal processing [<xref ref-type="bibr" rid="ref40">40</xref>].</p>
            </list-item>
            <list-item>
              <p>Special feature of the single-breath count: The airflow volume expansion during speech is in first approximation related to the square of the amplitude of the sound wave [<xref ref-type="bibr" rid="ref41">41</xref>]. We computed the integral of the square of the amplitude of the sound wave during the time window of the patient’s speech. Since there is no calibration of the microphone, the metric might be biased. There was considerable variability of diction during this exercise. Some subjects counted more slowly, while others appeared anxious and pronounced words quickly. We computed as an additional feature the percentage of time with vocal sound versus total time.</p>
            </list-item>
          </list>
          <p>For the voice analysis test in particular and for tests in general, there was significant variability in the parameters of data acquisition under clinical conditions, such as sound level. Providing guidance in real time to the patient will be essential to improve the ability to quantitate the telehealth examination.</p>
        </sec>
      </sec>
      <sec>
        <title>The Need for a Novel Telehealth Platform to Support the Protocol and Improvement of Data Acquisition</title>
        <p>Reproducibility requires that the various examinations are run in similar conditions. While we have mainly evaluated our algorithms on an existing data set of standard Zoom video evaluations with 6 patients, we next describe our new hardware and software solution named “Inteleclinic” (<xref rid="figure3" ref-type="fig">Figure 3</xref>) designed to improve data acquisition.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>View on the patient side of our cyber-physical system named “Inteleclinic” used to uniformize the sessions and improve the quality of the metrics. HD: high-definition.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <sec>
          <title>Controlling the Setting and Hardware</title>
          <p>To avoid changes in the quality of the recording (frames and audio), it is important that the hardware used is identical for all sessions and is calibrated. In an attempt to improve the quality of data acquisition with the future development of clinical studies, we built a new telehealth system with a high-definition camera and microphone that can be controlled remotely by the examiner. The recording is now performed on the patient side to obtain the raw footage of the video and audio in order to optimize maximum resolution and avoid any issues with network connection quality during the examination. Different interfaces and tools are added compared to Zoom’s control system to assist the patient and the doctor to focus on the consultation and not the technology. We demonstrate in the Results section the benefits of Inteleclinic compared to a standard Zoom video call of a patient at home using various technologies.</p>
        </sec>
        <sec>
          <title>Controlling Time</title>
          <p>Some of the tests, such as lid and eye positions as well as arm position, require precise timing from start to end. To avoid any manual entry errors, our digital heath system automatically computes start and end times. The single-breath test as practiced now has no control on loudness and timing. Breath capacity is the product of the air outflow flux and the duration of the exercise. To be more precise, breath capacity is the time integration of the time-dependent output flow on the time interval of the exercise. We found that the maximum number counted is weakly correlated with the duration of the counting exercise. The maximum number reached is dependent on diction and may not be used as a valuable metric. Outflow during speech is proportional in the first approximation to the square of the energy source of sound [<xref ref-type="bibr" rid="ref41">41</xref>]. Depending on how loud the count is, one may expect different airflow output values for the patient. We tested on our platform a visual aid on the telehealth display to guide patient counting with a consistent rhythm of about 1 number counted per second in both the count to 50 and single-breath counting exercises.</p>
        </sec>
        <sec>
          <title>Controlling the Framing of the ROI and the Distance From the ROI to the Camera</title>
          <p>The digitalization of the tests involving ptosis, diplopia, cheek puff, tongue to cheek, arm strength, and sit-to-stand movement depends heavily on vision through the telehealth system. While we used a standard Zoom video that was preregistered in this study, it is straightforward with our system to provide guidance on the quality of video acquisition to make sure that distance between landmarks of interest use approximately the same number of pixels in order to provide quality and consistency for the results. In practice, we can provide a mark on the display of the patient with Inteleclinic to make sure the individual is properly centered and distanced from the camera.</p>
        </sec>
        <sec>
          <title>Controlling Sounds</title>
          <p>Every telehealth session may have different loudness of the sound track depending on the microphone setting and how loud or how soft the patient is speaking. Loudness is computed based on the algorithms defined in the ITU-R BS.1770-4 and EBU R 128 standards. If the microphone is calibrated with a benchmark sound, the loudness of the sound track can be computed continuously and guidance can be provided to the patient on how loud or soft they should try to keep their voice during the exercises.</p>
        </sec>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>All participants provided written consent for inclusion in the study. The study that provided the data has been approved by the George Washington University Institutional Review Board (IRB# NCR224008).</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Standardization of the Data Acquisition</title>
        <p>We evaluated our methods and identified large variability in the data acquisition and mode of operation for the assessments performed by the single examiner. The purpose of the primary study from which we obtained the videos was to assess test/retest variability and interexaminer variation in performance of telemedicine evaluations (manuscript in preparation). Our goal was to assess accurate and robust measurements from the MG-CE in order to remove human bias. The videos from the clinical study are quickly showing some limitations as the hardware used to film was not identical and the recording was performed on the doctor side, linking the quality of the frames to the quality of the network on both sides. We will next present our methodology and the platform. One of the purposes of our project was to standardize the data acquisition during the telehealth session and provide real-time feedback of the quality of the assessments for the examiner.</p>
      </sec>
      <sec>
        <title>Eyelid Position and Eye Movements</title>
        <p>We have used a data set of images of 6 patients and 3 healthy subjects with broad diversity in skin color, eye color, ocular anatomy, and image frame resolution to test the accuracy of our approach. We identified 72 ROIs of patients’ eye movements and then annotated them with ground true measures obtained by manually zooming in the computer images. On average, we found the pupil location within 3 pixels and the distance from the pupil to the upper eyelid within 2 pixels independent of the image frame resolution. The relative accuracy in pixels was independent of the camera used. The standard Zoom video has 450×800 pixels per frame, a smartphone has 720×1280 pixels, and the Lumens B30U PTZ camera (Lumens Digital Optics Inc) has a resolution of 1080×1920. Overall, Inteleclinic doubles the resolution of a standard Zoom call and provides a submillimeter accuracy of lid position and eye motion. <xref rid="figure4" ref-type="fig">Figure 4</xref> provides an example of the localization of the upper lid, lower lid, and iris lower boundary detected automatically with our hybrid method using digital zooming of the face of the patient with both ROIs. We underline that the patient is sitting about 1 meter from the camera during the telehealth eye exercises, and one can see the patient’s face and shoulders. No particular effort was made to focus on the eyes of the patient in the video. The 6 red circles in each eye correspond to the markers of the ROI obtained with the deep learning library of model 1 (<xref rid="figure1" ref-type="fig">Figure 1</xref>A). The bottom markers are slightly off, and our local computer vision technique provides the ability to correct the position of the lower lid.</p>
        <p>In <xref rid="figure5" ref-type="fig">Figure 5</xref>, we show tracking of the distance between the lower boundary of the iris and the upper lid with a black curve, and the distance between the bottom of the iris and the lower lid with a red curve. One can check that the patient performs the exercise properly and can measure a 15% decay of the ptosis distance during the 1-minute exercise. As shown in the green least square fit with the green line, this decay is both linear and statistically significant. This decay is in fact difficult to notice during a medical examination without our method.</p>
        <p>In <xref rid="figure6" ref-type="fig">Figure 6</xref>, we report on the second exercise that tests diplopia. The red circle locations of the deep learning model of <xref rid="figure1" ref-type="fig">Figure 1</xref>A in the ROIs are accurate. We tracked the vertical border of the iris and computed the barycentric coordinate of the most inner points of the boundaries to compute any eventual misalignment of both eyes. The patient did not report double vision, and the quasisteady variation of the barycentric coordinates, as reported in <xref rid="figure7" ref-type="fig">Figure 7</xref>, confirmed this.</p>
        <p>However, the positions of the eyes of patients might be so extreme that some of the pupils might be partially obstructed during the exercise, which limits the value of the conclusion. In addition, we clearly observed ptosis during the exercise as the vertical dimension of the eye opening reached about half of what it was during the ptosis exercise.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Image during the ptosis exercise. Digital zoom on the view of the patient obtained with the Inteleclinic system showing anatomic markers obtained by computer vision in green, starting from the landmarks of the regions of interest obtained by deep learning.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Graphic representation of the distance between anatomic landmarks to asses ptosis dynamically during the first eye exercise.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Image during the diplopia exercise. Digital zoom on the view of the patient obtained with the Inteleclinic system showing anatomic markers obtained by computer vision in green, starting from the landmarks of the regions of interest obtained by deep learning.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Graphic representation of the bariatric coordinate of the anatomic landmarks used to assess eye alignment dynamically during the third eye exercise.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Cheek Puff and Tongue to Cheek</title>
        <p>We used a low-cost depth camera from Intel to reconstruct the local curvature of the cheek in laboratory conditions with a healthy subject who produced a large deformation, which was at the noise level of the signal. This evaluation would have failed for any patient who has difficulty to push the tongue into the cheek. Better depth accuracy could be obtained by sensors that use time-of-flight technology [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
        <p>The variety of videos demonstrates the limits and potential of our approach. In one video of the cheek puff exercise, the patient was told to blow his cheeks for about 2 seconds. The video was cut after 15 seconds because the patient was asked to test the stiffness of the skin with his fingers. The placement of the fingers on the cheek completely confused the AI tracking algorithm. The change in the mean value of the third component (blue) of the RGB classification inside the ROI on both sides of the cheek of the patient is not reliable unless the left cheek or right cheek ROI has good illumination. The detection is usually far less reliable on one of these ROIs because it is difficult to achieve good illumination on both sides of the face of the patient.</p>
        <p>Tracking mouth deformation during the exercise was a superior approach. First, we easily detected if and when the patient had the ability or did not have the ability to keep the mouth closed. Second, we tested several features, such as the distance between the corners where the upper and lower lip meet, that is, the segment delimited by the points (49) and (55) in the model (<xref rid="figure1" ref-type="fig">Figure 1</xref>A), the deformation of the mouth in the vertical direction, and the mean curvature of the upper lip and lower lip. <xref rid="figure8" ref-type="fig">Figure 8</xref> shows the feature that measures the normalized distance between the upper lip and the bottom of the nose during the cheek puff exercise using a standard Zoom video with 450×800 pixels per frame in an ADAPT (Adapting Disease Specific Outcome Measures Pilot Trial) patient. We obtained a curve that was close to the step function in this ADAPT patient, which accurately detected when the deformation of the cheek started and ended, and indicated how strong the deformation was. Not all features work all the time for all patients. As expected, variability in the anatomy of patients causes differences in which features work the best. Form our experience, we found that the combination of several features helps identify the extent of cheek puff during the exercise.</p>
        <p>We obtained very similar results for the tongue-to-cheek push exercise. In <xref rid="figure9" ref-type="fig">Figure 9</xref>, an ADAPT patient pushes the left cheek with the tongue and then pushes the right cheek at 5.6 seconds.</p>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Normalized distance of the upper lip to the lower part of the nose during the cheek puff exercise.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure9" position="float">
          <label>Figure 9</label>
          <caption>
            <p>Exercise involving the tongue pushing the left cheek and then the right cheek with an ADAPT (Adapting Disease Specific Outcome Measures Pilot Trial) patient and a standard Zoom video. Tracking modifications of the lip shape orientation during the exercise. The red vertical bar in the middle corresponds to the patient switching from pushing the left cheek to pushing the right cheek.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The geometric feature we used was the angle formed by the mouth and the horizontal axis. The exercise breaks the symmetry of the face, and this feature is particularly adapted to capture the one-side deformation of the cheek. The illumination figure shows only marginal change for the second part of the exercise and is therefore not very robust. One may expect however that better control of the light during the telehealth session will resolve this issue.</p>
        <p>These techniques will not work for a subject with a moustache or beard. The shape of the face of patients with a high BMI may also impact the quality of the results. More work needs to be done on the digitalization of this specific test. As mentioned before, the depth camera would need to be highly accurate in order for the signal to be above the noise level, which is not the case with entry-level and low-cost systems.</p>
      </sec>
      <sec>
        <title>Arm Position and Sit-to-Stand Movement</title>
        <p>Most videos of the MGNet data set offered only partial views of the body during these exercises and showed great variability. The model (<xref rid="figure1" ref-type="fig">Figure 1</xref>) failed under such conditions.</p>
        <p><xref rid="figure10" ref-type="fig">Figure 10</xref> shows a representative example of the arm angle decay due to weakening during the 120-second assessment of one of the ADAPT patients. The measurement exhibited some minor noise. We used a high-order filtering method [<xref ref-type="bibr" rid="ref43">43</xref>] to provide a meaningful graphic to limit the noise of the method and maintain the trend that could be used for the physical examination assessment. The decay of both arms was linear and significant. It was however difficult for the medical examiner to quantify the slope or even notice it.</p>
        <p><xref rid="figure11" ref-type="fig">Figure 11</xref> shows an example of the vertical elevation examination with respect to time for both hips, involving tracking the elevation of landmark points (23) and (24) (<xref rid="figure1" ref-type="fig">Figure 1</xref>B) as a function of time. From this measurement, we could not only compute acceleration and speed as indicators of muscle function but also assess the stability of the motion by measuring lateral motion in the x-coordinate.</p>
        <p>One of the benefits of having the whole body tracked during these MG-CE evaluations is the ability to access additional information, such as the ability of the patient to stay stationary and keep their balance. While all measures are in pixels in the video itself, we recovered a good approximation of the physical dimension using the known dimension of the seat.</p>
        <fig id="figure10" position="float">
          <label>Figure 10</label>
          <caption>
            <p>Patient performing one of the exercises of the protocol with movement of the arms. Tracking the angle of right and left arm lowering during the exercise.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure11" position="float">
          <label>Figure 11</label>
          <caption>
            <p>Elevation of both hips during the exercises: (A) normal stand up; (B) weak stand up. The right hip is indicated in blue, and the left hip is indicated in green.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig11.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Counting Exercise</title>
        <p>This evaluation is used to assess breathing and speech quality. We used a data set of 6 patients involving 2 sessions and 9 additional healthy subject voice exercises. Audio files were cut to start and end approximately within 1 to 3 seconds of initiation and the end of counting. Based on the evaluation of patients performed by the physician according to the protocol [<xref ref-type="bibr" rid="ref7">7</xref>], there were rarely differences between the first visit and the second controlled visit for ptosis and diplopia grading. We found however that most of the metrics described above had some variability from one visit to another, and might be considered as more sensitive metrics than the current physician examination. We will report here on our main findings with these metrics.</p>
        <p>Instead of using the maximum number reached during the single-breath counting exercise, we used the duration of the exercise itself to grade the exercise. We found that the maximum number counted was weakly correlated with the duration of the single-breath counting exercise. The maximum number reached was indeed dependent on the speed of diction that varies greatly from one patient to another. To be more precise, one may expect that the airflow output value for the patient depends on the loudness of the voice and the pitch of the voice. In fact, we found that loudness and pitch computed with our algorithm varied dramatically from one patient to another. There had been no calibration of the microphone at the patient’s home, so we used the duration of the single-breath counting exercise as an indicator of MG severity. We suspected that a lower duration of the single-breath counting exercise is associated with more severe shortness of breath symptoms. We formulated the hypothesis that breathing difficulty might be detected by analyzing the signal in a range of frequencies concentrating on the typical breathing rate window. We used a fast Fourier transform to obtain the spectrum of the voice signal during the complete duration of the counting to 50 exercise and computed the energy of the signal restricted in the low-frequency window (5 Hz to 25 Hz). We found a weak correlation between the energy and MG severity estimated as described above (<xref rid="figure12" ref-type="fig">Figure 12</xref>). There were 2 outliers corresponding to 1 of the 6 patients who had severe symptoms according to the examiner annotation (<xref rid="figure12" ref-type="fig">Figure 12</xref>). The voice of the patient was so weak in the acquisition that the breathing signal information might have been at the noise level of the method.</p>
        <p>We did not proceed with the identification of dysarthria per say, but looked for a relationship involving one of the generic metrics that could be computed such as spectral entropy or Teager-Kaiser energy. An example of the mean entropy of the voice signal (<xref rid="figure13" ref-type="fig">Figure 13</xref>) shows that this criterion is promising and may separate patients from healthy controls. Counting the number of singular picks in entropy during the examination provides better separation between patients and healthy subjects. The argument would be that an MG patient has a more monotonic voice than a healthy subject. More evaluations will be needed to confirm if entropy is a good metric. In contrast, Teager-Kaiser energy did not clearly separate MG patients.</p>
        <fig id="figure12" position="float">
          <label>Figure 12</label>
          <caption>
            <p>Weak correlation between the duration of counting and the energy of the signal in the low-frequency bandwidth corresponding to the breathing range. Three patient sessions with voice loudness below the threshold were not counted in the fitting. The 2 outliers are from 1 patient who had a very weak voice acquisition.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig12.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure13" position="float">
          <label>Figure 13</label>
          <caption>
            <p>Mean Entropy of the voice signal during Exercise 7.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e43387_fig13.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>We have systematically built a series of algorithms that can automatically compute the metrics of the MG-CE, which is a standardized telemedicine physical examination for patients with MG. This effort was motivated by the increasing use of telemedicine and the appreciation of inherent limitations of presently used clinical outcome measures [<xref ref-type="bibr" rid="ref44">44</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. For the MG-CE, the examiner ranked the subjective observation of each examination item into categories, but this separation among classes was performed a priori and was not the result of data mining in a large population. In that context, the threshold numbers used to separate metrics, such as duration, are likely to be artificial. The data collection for these tests during a teleconsultation is tedious, repetitive, and demanding for the physician. We demonstrated a methodology, which can accelerate the data collection and provide the rational for a posteriori classification of MG severity based on a large population of patients.</p>
        <p>Other ranking of the test might be intuitive, for example, how to define or compare difficulty in standing up. It may involve tedious motion due to muscle weakness, arthritis, or obesity. Currently, the duration of cheek deformation is not counted, but our methodology may eventually provide a precise measurement. Based on the new data set that our method provides, one should investigate further if the MG-CE classification, as well as all other categorical measures in MG, should be revisited to consider the new metrics that our algorithm can provide. In particular, the dynamic component of muscle weakness, which is a hallmark of MG and an important factor in quality of life, is not captured well by existing clinical outcome measures and not at all in routine clinical practice [<xref ref-type="bibr" rid="ref46">46</xref>].</p>
        <p>Our study exposed limitations in aspects of neuromuscular examination. The ability to deform the cheek does not say much about the ability to hold pressure and for how long. The cheek deformation exercise did prove to be the most difficult for achieving proper digitalization. The scoring of this exercise in the original medical protocol appeared particularly limited. We have refined mouth deformation monitoring under laboratory conditions with our Inteleclinic system to better apply computer learning techniques. Moreover, the counting exercises can be used to assess respiration function, but the number achieved does not fully equate to the severity of respiratory insufficiency.</p>
        <p>Another challenge for our evaluation is that patients can compensate for some level of weakness and reduce the apparent severity assessed by the examination. For example, the ability to precisely compute the trajectory of the patient’s hip movement during the sit-to-stand exercise may identify if there is compensation by one leg supporting the movement more than the other. This situation could be particularly difficult for a human examiner to identify. Overall, our algorithms should give unbiased results and remove any potential subjectivity from the medical examination.</p>
        <p>The accuracy of every computer algorithm must be constantly interrogated. Every metric should, in principle, come with an error estimate, which is not frequently the case in the current solutions, including that of the human examiner. One key component to ensure such quality of results is to control the condition of the acquisition of video and sound during the telemedicine session. With voice analysis, we would need to ensure proper calibration of the microphone at the patient’s home, as well as check during the sound registration that the patient speaks with a loudness within acceptable bounds. The later can be done automatically in order to provide guidance during the examination. Similarly, the AI and computer vision aspects of the data acquisition require the patient’s distance from the camera and the light condition to always be consistent with the exercise requested. This is technically feasible because the telehealth system can compute in real time the dimension in pixels of any ROI and the quality of segmentation in order to correct any obvious mistake in the data acquisition. For example, the AI model of <xref rid="figure1" ref-type="fig">Figure 1</xref>B that tracks the sit-to-stand exercise fails if the patient’s head leaves the video frame. This kind of problem can be immediately reported to the examiner during the test. Another example is that the single-breath counting test may poorly define the initial state, speed, and loudness of speech, as counting greatly varies between patients and has an impact on breath performance evaluation.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>Systematic digitalization and control of quality of the MG-CE are advantageous and would allow trained medical assistants to perform standardized examinations, allowing the physician to concentrate on patient questions and education instead of managing the logistics of the test. We also assessed our hardware-software “Inteleclinic” solution for telehealth consultation, which appears to be able to enhance data quality (described in a provisional patent; number 63305420; Garbey M and Joerger G, 2020). Our methods and technology would be particularly applicable to clinical trials, which are limited in requiring a large number of examiners who all perform assessments in slightly different manners. A trial could substitute present operations with a central telemedicine facility. We envision that our telehealth approach can be applied to other neuromuscular diseases beyond MG and will provide objective, reproducible, and quantitative health care assessments that go beyond the present capabilities.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">ADAPT</term>
          <def>
            <p>Adapting Disease Specific Outcome Measures Pilot Trial</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">MG</term>
          <def>
            <p>myasthenia gravis</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">MG-CE</term>
          <def>
            <p>Myasthenia Gravis Core Examination</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">ROI</term>
          <def>
            <p>region of interest</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The work to gather clinical data was supported in part by the MGNet, a member of the Rare Disease Clinical Research Network Consortium (RDCRN) (NIH U54 NS115054). Funding support for the Data Management Coordinating Center is provided by the National Center for Advancing Translational Sciences (NCATS) and the National Institute of Neurological Disorders and Stroke (NINDS). </p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Feldman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Szerencsy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Austrian</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kothari</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Heo</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Barzideh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hickey</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Snapp</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Aminian</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Testa</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Giving Your Electronic Health Record a Checkup After COVID-19: A Practical Framework for Reviewing Clinical Decision Support in Light of the Telemedicine Expansion</article-title>
          <source>JMIR Med Inform</source>
          <year>2021</year>
          <month>01</month>
          <day>27</day>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>e21712</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2021/1/e21712/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/21712</pub-id>
          <pub-id pub-id-type="medline">33400683</pub-id>
          <pub-id pub-id-type="pii">v9i1e21712</pub-id>
          <pub-id pub-id-type="pmcid">PMC7842852</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Faget</surname>
              <given-names>KY</given-names>
            </name>
          </person-group>
          <article-title>The role of telehealth in decentralized clinical trials</article-title>
          <source>Journal of Health Care Compliance</source>
          <year>2021</year>
          <fpage>1</fpage>
          <lpage>5</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.foley.com/en/insights/publications/2021/05/role-telehealth-decentralized-clinical-trials"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giannotta</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Petrelli</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Pini</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Telemedicine applied to neuromuscular disorders: focus on the COVID-19 pandemic era</article-title>
          <source>Acta Myol</source>
          <year>2022</year>
          <month>03</month>
          <volume>41</volume>
          <issue>1</issue>
          <fpage>30</fpage>
          <lpage>36</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35465343"/>
          </comment>
          <pub-id pub-id-type="doi">10.36185/2532-1900-066</pub-id>
          <pub-id pub-id-type="medline">35465343</pub-id>
          <pub-id pub-id-type="pmcid">PMC9004335</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Spina</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Trojsi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Tozza</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Iovino</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Iodice</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Passaniti</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Abbadessa</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Bonavita</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Leocani</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tedeschi</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Manganelli</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Lavorgna</surname>
              <given-names>L</given-names>
            </name>
            <collab>Digital Technologies‚ WebSocial Media Study Group of the Italian Society of Neurology (SIN)</collab>
          </person-group>
          <article-title>How to manage with telemedicine people with neuromuscular diseases?</article-title>
          <source>Neurol Sci</source>
          <year>2021</year>
          <month>09</month>
          <day>25</day>
          <volume>42</volume>
          <issue>9</issue>
          <fpage>3553</fpage>
          <lpage>3559</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34173087"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10072-021-05396-8</pub-id>
          <pub-id pub-id-type="medline">34173087</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10072-021-05396-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC8232560</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gmunder</surname>
              <given-names>KN</given-names>
            </name>
            <name name-style="western">
              <surname>Ruiz</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Franceschi</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Suarez</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Factors to effective telemedicine visits during the COVID-19 pandemic: Cohort study</article-title>
          <source>JMIR Med Inform</source>
          <year>2021</year>
          <month>08</month>
          <day>27</day>
          <volume>9</volume>
          <issue>8</issue>
          <fpage>e27977</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2021/8/e27977/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/27977</pub-id>
          <pub-id pub-id-type="medline">34254936</pub-id>
          <pub-id pub-id-type="pii">v9i8e27977</pub-id>
          <pub-id pub-id-type="pmcid">PMC8404776</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alhajri</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Simsekler</surname>
              <given-names>MCE</given-names>
            </name>
            <name name-style="western">
              <surname>Alfalasi</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Alhashmi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>AlGhatrif</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Balalaa</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Al Ali</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Almaashari</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Al Memari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Al Hosani</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Al Zaabi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Almazroui</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Alhashemi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Baltatu</surname>
              <given-names>OC</given-names>
            </name>
          </person-group>
          <article-title>Physicians' attitudes toward telemedicine consultations during the COVID-19 pandemic: Cross-sectional study</article-title>
          <source>JMIR Med Inform</source>
          <year>2021</year>
          <month>06</month>
          <day>01</day>
          <volume>9</volume>
          <issue>6</issue>
          <fpage>e29251</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2021/6/e29251/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/29251</pub-id>
          <pub-id pub-id-type="medline">34001497</pub-id>
          <pub-id pub-id-type="pii">v9i6e29251</pub-id>
          <pub-id pub-id-type="pmcid">PMC8171285</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guidon</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Muppidi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nowak</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Guptill</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Hehir</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Ruzhansky</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Burton</surname>
              <given-names>LB</given-names>
            </name>
            <name name-style="western">
              <surname>Post</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cutter</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Conwit</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mejia</surname>
              <given-names>NI</given-names>
            </name>
            <name name-style="western">
              <surname>Kaminski</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>JF</given-names>
            </name>
          </person-group>
          <article-title>Telemedicine visits in myasthenia gravis: Expert guidance and the Myasthenia Gravis Core Exam (MG-CE)</article-title>
          <source>Muscle Nerve</source>
          <year>2021</year>
          <month>09</month>
          <day>07</day>
          <volume>64</volume>
          <issue>3</issue>
          <fpage>270</fpage>
          <lpage>276</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/33959997"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/mus.27260</pub-id>
          <pub-id pub-id-type="medline">33959997</pub-id>
          <pub-id pub-id-type="pmcid">PMC9057373</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Clark</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Mentiplay</surname>
              <given-names>BF</given-names>
            </name>
            <name name-style="western">
              <surname>Hough</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Pua</surname>
              <given-names>YH</given-names>
            </name>
          </person-group>
          <article-title>Three-dimensional cameras and skeleton pose tracking for physical function assessment: A review of uses, validity, current developments and Kinect alternatives</article-title>
          <source>Gait Posture</source>
          <year>2019</year>
          <month>02</month>
          <volume>68</volume>
          <fpage>193</fpage>
          <lpage>200</lpage>
          <pub-id pub-id-type="doi">10.1016/j.gaitpost.2018.11.029</pub-id>
          <pub-id pub-id-type="medline">30500731</pub-id>
          <pub-id pub-id-type="pii">S0966-6362(18)31191-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bazarevsky</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Grishchenko</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Raveendran</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Grundmann</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>BlazePose: On-device Real-time Body Pose tracking</article-title>
          <source>arXiv</source>
          <access-date>2023-03-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2006.10204">https://arxiv.org/abs/2006.10204</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Learned-Miller</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>FDDB: A Benchmark for Face Detection in Unconstrained Settings</article-title>
          <source>University of Massachusetts</source>
          <access-date>2023-03-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://vis-www.cs.umass.edu/fddb/fddb.pdf">http://vis-www.cs.umass.edu/fddb/fddb.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kabakus</surname>
              <given-names>AT</given-names>
            </name>
          </person-group>
          <article-title>An experimental performance comparison of widely used face detection tools</article-title>
          <source>ADCAIJ: Advances in Distributed Computing and Artificial Intelligence Journal</source>
          <year>2019</year>
          <volume>8</volume>
          <issue>3</issue>
          <fpage>5</fpage>
          <lpage>12</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://revistas.usal.es/cinco/index.php/2255-2863/article/view/ADCAIJ201983512"/>
          </comment>
          <pub-id pub-id-type="doi">10.14201/ADCAIJ201983512</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="web">
          <article-title>opencv</article-title>
          <source>GitHub</source>
          <access-date>2023-03-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_eye.xml">https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_eye.xml</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lienhart</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Maydt</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>An extended set of Haar-like features for rapid object detection</article-title>
          <source>Proceedings of the International Conference on Image Processing</source>
          <year>2002</year>
          <conf-name>International Conference on Image Processing</conf-name>
          <conf-date>September 22-25, 2002</conf-date>
          <conf-loc>Rochester, NY, USA</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icip.2002.1038171</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Face alignment by explicit shape regression</article-title>
          <source>Int J Comput Vis</source>
          <year>2013</year>
          <month>12</month>
          <day>13</day>
          <volume>107</volume>
          <issue>2</issue>
          <fpage>177</fpage>
          <lpage>190</lpage>
          <pub-id pub-id-type="doi">10.1007/s11263-013-0667-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kazemi</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sullivan</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>One millisecond face alignment with an ensemble of regression trees</article-title>
          <year>2014</year>
          <conf-name>IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 23-28, 2014</conf-date>
          <conf-loc>Columbus, OH, USA</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr.2014.241</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sagonas</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Antonakos</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Tzimiropoulos</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zafeiriou</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pantic</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>300 Faces In-The-Wild Challenge: database and results</article-title>
          <source>Image and Vision Computing</source>
          <year>2016</year>
          <month>03</month>
          <volume>47</volume>
          <fpage>3</fpage>
          <lpage>18</lpage>
          <pub-id pub-id-type="doi">10.1016/j.imavis.2016.01.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Putterman</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Margin reflex distance (MRD) 1, 2, and 3</article-title>
          <source>Ophthalmic Plast Reconstr Surg</source>
          <year>2012</year>
          <volume>28</volume>
          <issue>4</issue>
          <fpage>308</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1097/IOP.0b013e3182523b7f</pub-id>
          <pub-id pub-id-type="medline">22785597</pub-id>
          <pub-id pub-id-type="pii">00002341-201207000-00024</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Callahan</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Surgically mismanaged ptosis associated with double elevator palsy</article-title>
          <source>Arch Ophthalmol</source>
          <year>1981</year>
          <month>01</month>
          <day>01</day>
          <volume>99</volume>
          <issue>1</issue>
          <fpage>108</fpage>
          <lpage>12</lpage>
          <pub-id pub-id-type="doi">10.1001/archopht.1981.03930010110014</pub-id>
          <pub-id pub-id-type="medline">7458735</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Struck</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Larson</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Surgery for supranuclear monocular elevation deficiency</article-title>
          <source>Strabismus</source>
          <year>2015</year>
          <month>12</month>
          <day>15</day>
          <volume>23</volume>
          <issue>4</issue>
          <fpage>176</fpage>
          <lpage>81</lpage>
          <pub-id pub-id-type="doi">10.3109/09273972.2015.1099710</pub-id>
          <pub-id pub-id-type="medline">26669423</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yurdakul</surname>
              <given-names>NS</given-names>
            </name>
            <name name-style="western">
              <surname>Ugurlu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Maden</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Surgical treatment in patients with double elevator palsy</article-title>
          <source>Eur J Ophthalmol</source>
          <year>2009</year>
          <month>01</month>
          <day>24</day>
          <volume>19</volume>
          <issue>5</issue>
          <fpage>697</fpage>
          <lpage>701</lpage>
          <pub-id pub-id-type="doi">10.1177/112067210901900502</pub-id>
          <pub-id pub-id-type="medline">19787584</pub-id>
          <pub-id pub-id-type="pii">FC1DDC90-B9AC-4DC8-BAD5-FFD6D2B29513</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>HJ</given-names>
            </name>
          </person-group>
          <article-title>Human eye tracking and related issues: A review</article-title>
          <source>International Journal of Scientific and Research Publications</source>
          <year>2012</year>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ijsrp.org/research-paper-0912/ijsrp-p0929.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kunka</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kostek</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Non-intrusive infrared-free eye tracking method</article-title>
          <year>2009</year>
          <conf-name>Signal Processing Algorithms, Architectures, Arrangements, and Applications SPA 2009</conf-name>
          <conf-date>September 24-26, 2009</conf-date>
          <conf-loc>Poznan, Poland</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Toennies</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Behrens</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Aurnhammer</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Feasibility of Hough-transform-based iris localisation for real-time-application</article-title>
          <year>2002</year>
          <conf-name>International Conference on Pattern Recognition</conf-name>
          <conf-date>August 11-15, 2002</conf-date>
          <conf-loc>Quebec City, QC, Canada</conf-loc>
          <fpage>1053</fpage>
          <lpage>1056</lpage>
          <pub-id pub-id-type="doi">10.1109/icpr.2002.1048486</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A computer-aided system for ocular myasthenia gravis diagnosis</article-title>
          <source>Tsinghua Sci Technol</source>
          <year>2021</year>
          <month>10</month>
          <volume>26</volume>
          <issue>5</issue>
          <fpage>749</fpage>
          <lpage>758</lpage>
          <pub-id pub-id-type="doi">10.26599/TST.2021.9010025</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scharstein</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Szeliski</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>High-accuracy stereo depth maps using structured light</article-title>
          <year>2003</year>
          <conf-name>IEEE Computer Society Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 18-20, 2003</conf-date>
          <conf-loc>Madison, WI, USA</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr.2003.1211354</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Trucco</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Verri</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Introductory Techniques for 3-D Computer Vision</source>
          <year>1998</year>
          <publisher-loc>Englewood Cliffs, NJ, USA</publisher-loc>
          <publisher-name>Prentice Hall</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Geng</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Structured-light 3D surface imaging: a tutorial</article-title>
          <source>Adv Opt Photon</source>
          <year>2011</year>
          <month>03</month>
          <day>31</day>
          <volume>3</volume>
          <issue>2</issue>
          <fpage>128</fpage>
          <pub-id pub-id-type="doi">10.1364/aop.3.000128</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siena</surname>
              <given-names>FL</given-names>
            </name>
            <name name-style="western">
              <surname>Byrom</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Watts</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Breedon</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Utilising the Intel RealSense Camera for measuring health outcomes in clinical research</article-title>
          <source>J Med Syst</source>
          <year>2018</year>
          <month>02</month>
          <day>05</day>
          <volume>42</volume>
          <issue>3</issue>
          <fpage>53</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/29404692"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10916-018-0905-x</pub-id>
          <pub-id pub-id-type="medline">29404692</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10916-018-0905-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC5799357</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hassan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aronovich</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kotzen</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Mohan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tal-Singer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Simonelli</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Detecting shortness of breath remotely and accurately using smartphones and vocal biomarkers</article-title>
          <source>European Respiratory Journal</source>
          <year>2020</year>
          <volume>56</volume>
          <fpage>4715</fpage>
          <pub-id pub-id-type="doi">10.1183/13993003.congress-2020.4715</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Enderby</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Disorders of communication: dysarthria</article-title>
          <source>Handb Clin Neurol</source>
          <year>2013</year>
          <volume>110</volume>
          <fpage>273</fpage>
          <lpage>81</lpage>
          <pub-id pub-id-type="doi">10.1016/B978-0-444-52901-5.00022-8</pub-id>
          <pub-id pub-id-type="medline">23312647</pub-id>
          <pub-id pub-id-type="pii">B978-0-444-52901-5.00022-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alam</surname>
              <given-names>MZ</given-names>
            </name>
            <name name-style="western">
              <surname>Simonetti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Brillantino</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tayler</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Grainge</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Siribaddana</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Nouraei</surname>
              <given-names>SAR</given-names>
            </name>
            <name name-style="western">
              <surname>Batchelor</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rahman</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Mancuzo</surname>
              <given-names>EV</given-names>
            </name>
            <name name-style="western">
              <surname>Holloway</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Holloway</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Rezwan</surname>
              <given-names>FI</given-names>
            </name>
          </person-group>
          <article-title>Predicting pulmonary function from the analysis of voice: A machine learning approach</article-title>
          <source>Front Digit Health</source>
          <year>2022</year>
          <month>2</month>
          <day>8</day>
          <volume>4</volume>
          <fpage>750226</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35211691"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdgth.2022.750226</pub-id>
          <pub-id pub-id-type="medline">35211691</pub-id>
          <pub-id pub-id-type="pmcid">PMC8861188</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ijitona</surname>
              <given-names>TB</given-names>
            </name>
            <name name-style="western">
              <surname>Soraghan</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Lowit</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Di-Caterina</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Yue</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Automatic detection of speech disorder in dysarthria using extended speech feature extraction and neural networks classification</article-title>
          <year>2017</year>
          <conf-name>​IET 3rd International Conference on ​​Intelligent Signal Processing (ISP 2017)</conf-name>
          <conf-date>December 04-05, 2017</conf-date>
          <conf-loc>London</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1049/cp.2017.0360</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Spangler</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Vinodchandran</surname>
              <given-names>NV</given-names>
            </name>
            <name name-style="western">
              <surname>Samal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Green</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <article-title>Fractal features for automatic detection of dysarthria</article-title>
          <year>2017</year>
          <conf-name>IEEE EMBS International Conference on Biomedical &amp; Health Informatics (BHI)</conf-name>
          <conf-date>February 16-19, 2017</conf-date>
          <conf-loc>Orlando, FL, USA</conf-loc>
          <fpage>437</fpage>
          <lpage>440</lpage>
          <pub-id pub-id-type="doi">10.1109/bhi.2017.7897299</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mefferd</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bagnato</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>A first investigation of tongue, lip, and jaw movements in persons with dysarthria due to multiple sclerosis</article-title>
          <source>Mult Scler Relat Disord</source>
          <year>2019</year>
          <month>01</month>
          <volume>27</volume>
          <fpage>188</fpage>
          <lpage>194</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30399501"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.msard.2018.10.116</pub-id>
          <pub-id pub-id-type="medline">30399501</pub-id>
          <pub-id pub-id-type="pii">S2211-0348(18)30482-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC6333529</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Warden</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition</article-title>
          <source>arXiv</source>
          <year>2018</year>
          <access-date>2023-03-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1804.03209">https://arxiv.org/abs/1804.03209</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boudraa</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Salzenstein</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Teager–Kaiser energy methods for signal and image analysis: A review</article-title>
          <source>Digital Signal Processing</source>
          <year>2018</year>
          <month>07</month>
          <volume>78</volume>
          <fpage>338</fpage>
          <lpage>375</lpage>
          <pub-id pub-id-type="doi">10.1016/j.dsp.2018.03.010</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pan</surname>
              <given-names>YN</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>XL</given-names>
            </name>
          </person-group>
          <article-title>Spectral entropy: A complementary index for rolling element bearing performance degradation assessment</article-title>
          <source>Proceedings of the Institution of Mechanical Engineers, Part C: Journal of Mechanical Engineering Science</source>
          <year>2008</year>
          <month>12</month>
          <day>11</day>
          <volume>223</volume>
          <issue>5</issue>
          <fpage>1223</fpage>
          <lpage>1231</lpage>
          <pub-id pub-id-type="doi">10.1243/09544062JMES1224</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Parey</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A review of gear fault diagnosis using various condition indicators</article-title>
          <source>Procedia Engineering</source>
          <year>2016</year>
          <volume>144</volume>
          <fpage>253</fpage>
          <lpage>263</lpage>
          <pub-id pub-id-type="doi">10.1016/j.proeng.2016.05.131</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shen</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Hung</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>LS</given-names>
            </name>
          </person-group>
          <article-title>Robust entropy-based endpoint detection for speech recognition in noisy environments</article-title>
          <source>Proceedings of the 5th International Conference on Spoken Language Processing</source>
          <year>1998</year>
          <conf-name>5th International Conference on Spoken Language Processing</conf-name>
          <conf-date>November 30-December 4, 1998</conf-date>
          <conf-loc>Sydney, Australia</conf-loc>
          <pub-id pub-id-type="doi">10.21437/ICSLP.1998-527</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vakkuri</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yli-Hankala</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Talja</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mustola</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tolvanen-Laakso</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sampson</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Viertiö-Oja</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Time-frequency balanced spectral entropy as a measure of anesthetic drug effect in central nervous system during sevoflurane, propofol, and thiopental anesthesia</article-title>
          <source>Acta Anaesthesiol Scand</source>
          <year>2004</year>
          <month>02</month>
          <volume>48</volume>
          <issue>2</issue>
          <fpage>145</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1111/j.0001-5172.2004.00323.x</pub-id>
          <pub-id pub-id-type="medline">14995935</pub-id>
          <pub-id pub-id-type="pii">323</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tong</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Sataloff</surname>
              <given-names>RT</given-names>
            </name>
          </person-group>
          <article-title>Respiratory function and voice: The role for airflow measures</article-title>
          <source>J Voice</source>
          <year>2022</year>
          <month>07</month>
          <volume>36</volume>
          <issue>4</issue>
          <fpage>542</fpage>
          <lpage>553</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvoice.2020.07.019</pub-id>
          <pub-id pub-id-type="medline">32981809</pub-id>
          <pub-id pub-id-type="pii">S0892-1997(20)30268-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Horaud</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hansard</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Evangelidis</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ménier</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>An overview of depth cameras and range scanners based on time-of-flight technologies</article-title>
          <source>Machine Vision and Applications</source>
          <year>2016</year>
          <month>6</month>
          <day>16</day>
          <volume>27</volume>
          <issue>7</issue>
          <fpage>1005</fpage>
          <lpage>1020</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2012.06772,"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s00138-016-0784-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Garbey</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Merla</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pavlidis</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Contact-free measurement of cardiac pulse based on the analysis of thermal imagery</article-title>
          <source>IEEE Trans Biomed Eng</source>
          <year>2007</year>
          <month>08</month>
          <volume>54</volume>
          <issue>8</issue>
          <fpage>1418</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1109/TBME.2007.891930</pub-id>
          <pub-id pub-id-type="medline">17694862</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McPherson</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Aban</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Duda</surname>
              <given-names>PW</given-names>
            </name>
            <name name-style="western">
              <surname>Farzaneh-Far</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wolfe</surname>
              <given-names>GI</given-names>
            </name>
            <name name-style="western">
              <surname>Kaminski</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>Cutter</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>I</given-names>
            </name>
            <collab>of the MGTX Study Group</collab>
          </person-group>
          <article-title>Correlation of Quantitative Myasthenia Gravis and Myasthenia Gravis Activities of Daily Living scales in the MGTX study</article-title>
          <source>Muscle Nerve</source>
          <year>2020</year>
          <month>08</month>
          <day>04</day>
          <volume>62</volume>
          <issue>2</issue>
          <fpage>261</fpage>
          <lpage>266</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32369631"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/mus.26910</pub-id>
          <pub-id pub-id-type="medline">32369631</pub-id>
          <pub-id pub-id-type="pmcid">PMC7496446</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cleanthous</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mork</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Regnault</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cano</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kaminski</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>Morel</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Development of the Myasthenia Gravis (MG) Symptoms PRO: a case study of a patient-centred outcome measure in rare disease</article-title>
          <source>Orphanet J Rare Dis</source>
          <year>2021</year>
          <month>10</month>
          <day>30</day>
          <volume>16</volume>
          <issue>1</issue>
          <fpage>457</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ojrd.biomedcentral.com/articles/10.1186/s13023-021-02064-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13023-021-02064-0</pub-id>
          <pub-id pub-id-type="medline">34717694</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13023-021-02064-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC8556940</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Benatar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Cutter</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Kaminski</surname>
              <given-names>HJ</given-names>
            </name>
          </person-group>
          <article-title>The best and worst of times in therapy development for myasthenia gravis</article-title>
          <source>Muscle Nerve</source>
          <year>2023</year>
          <month>01</month>
          <day>12</day>
          <volume>67</volume>
          <issue>1</issue>
          <fpage>12</fpage>
          <lpage>16</lpage>
          <pub-id pub-id-type="doi">10.1002/mus.27742</pub-id>
          <pub-id pub-id-type="medline">36321730</pub-id>
          <pub-id pub-id-type="pmcid">PMC9780175</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
