<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Neurotech</journal-id>
      <journal-title>JMIR Neurotechnology</journal-title>
      <issn pub-type="epub">2817-092X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v2i1e45828</article-id>
      <article-id pub-id-type="pmid"/>
      <article-id pub-id-type="doi">10.2196/45828</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Connect Brain, a Mobile App for Studying Depth Perception in Angiography Visualization: Gamification Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Kubben</surname>
            <given-names>Pieter</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Blumberg</surname>
            <given-names>Fran</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Preim</surname>
            <given-names>Bernhard</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Titov</surname>
            <given-names>Andrey</given-names>
          </name>
          <degrees>BCompSc, MCompSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Gina Cody School of Computer Science and Engineering</institution>
            <institution>Concordia University</institution>
            <addr-line>1455 Boul. de Maisonneuve Ouest</addr-line>
            <addr-line>Montreal, QC, H3G 1M8</addr-line>
            <country>Canada</country>
            <phone>1 514 848 2424</phone>
            <email>andrey.titov.1@ens.etsmtl.ca</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5042-2736</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Drouin</surname>
            <given-names>Simon</given-names>
          </name>
          <degrees>BEng, MSc, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7265-8747</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Kersten-Oertel</surname>
            <given-names>Marta</given-names>
          </name>
          <degrees>BA, BSc, MSc, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9492-8402</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Software and Information Technology Engineering Department</institution>
        <institution>École de Technologie Supérieure</institution>
        <addr-line>Montreal, QC</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Gina Cody School of Computer Science and Engineering</institution>
        <institution>Concordia University</institution>
        <addr-line>Montreal, QC</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Andrey Titov <email>andrey.titov.1@ens.etsmtl.ca</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2023</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>20</day>
        <month>10</month>
        <year>2023</year>
      </pub-date>
      <volume>2</volume>
      <elocation-id>e45828</elocation-id>
      <history>
        <date date-type="received">
          <day>19</day>
          <month>1</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>10</day>
          <month>2</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>3</day>
          <month>4</month>
          <year>2023</year>
        </date>
        <date date-type="accepted">
          <day>20</day>
          <month>9</month>
          <year>2023</year>
        </date>
      </history>
      <copyright-statement>©Andrey Titov, Simon Drouin, Marta Kersten-Oertel. Originally published in JMIR Neurotechnology (https://neuro.jmir.org), 20.10.2023.</copyright-statement>
      <copyright-year>2023</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Neurotechnology, is properly cited. The complete bibliographic information, a link to the original publication on https://neuro.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://neuro.jmir.org/2023/1/e45828" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>One of the bottlenecks of visualization research is the lack of volunteers for studies that evaluate new methods and paradigms. The increased availability of web-based marketplaces, combined with the possibility of implementing volume rendering, a computationally expensive method, on mobile devices, has opened the door for using gamification in the context of medical image visualization studies.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We aimed to describe a gamified study that we conducted with the goal of comparing several cerebrovascular visualization techniques and to evaluate whether gamification is a valid paradigm for conducting user studies in the domain of medical imaging.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>The study was implemented in the form of a mobile game, <italic>Connect Brain</italic>, which was developed and distributed on both Android (Google LLC) and iOS (Apple Inc) platforms. Connect Brain features 2 minigames: one asks the player to make decisions about the depth of different vessels, and the other asks the player to determine whether 2 vessels are connected.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The gamification paradigm, which allowed us to collect many data samples (5267 and 1810 for the depth comparison and vessel connectivity tasks, respectively) from many participants (N=111), yielded similar results regarding the effectiveness of visualization techniques to those of smaller in-laboratory studies.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The results of our study suggest that the gamification paradigm not only is a viable alternative to traditional in-laboratory user studies but could also present some advantages.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>medical image visualization</kwd>
        <kwd>volume visualization</kwd>
        <kwd>depth cues</kwd>
        <kwd>angiography</kwd>
        <kwd>gamification</kwd>
        <kwd>mobile games</kwd>
        <kwd>mobile phone</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>In the field of medical imaging, angiography is used to visualize vascular structures inside the body. This is typically performed by injecting a contrast substance into a patient and imaging the patient via x-ray, magnetic resonance, or computed tomography [<xref ref-type="bibr" rid="ref1">1</xref>]. For 3D x-ray, magnetic resonance, or computed tomography angiography (CTA), the result is a 3D volumetric representation of the scanned patient’s vascular anatomy. This 3D volume can be visualized using methods such as axis-aligned slicing [<xref ref-type="bibr" rid="ref2">2</xref>], volume rendering, and surface rendering [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
        <p>Cerebral angiography specifically depicts the blood vessels of the brain. The goal of this type of angiography is to help radiologists and surgeons understand the cerebral vasculature and detect abnormalities such as stenosis, arteriovenous malformations, and aneurisms [<xref ref-type="bibr" rid="ref4">4</xref>]. However, visualizing angiography data such that they can be spatially well understood presents certain challenges [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. First, the cerebral vasculature is complex, with intricate branching and many overlapping vessels, which hinders the understanding of the data in 3D [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. Second, owing to variations in anatomy from patient to patient, surgeons may not always be able to rely on past experience to understand a new data set [<xref ref-type="bibr" rid="ref1">1</xref>]. Third, depending on the environment (eg, the operating room), not all visualization methods might be suitable for rendering the data. For example, stereoscopic viewing requires specialized equipment (eg, a stereoscopic display or augmented reality glasses), which is not always available. Perspective rendering may also be inconvenient to use when displaying the data, as radiologists and surgeons may want to perform measurements on the angiographic image [<xref ref-type="bibr" rid="ref4">4</xref>]; therefore, orthographic projection is most commonly used for 3D medical image visualization [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref4">4</xref>].</p>
      </sec>
      <sec>
        <title>Motivation</title>
        <p>To improve the depth perception and spatial understanding of vascular volumes, numerous perceptually driven vessel visualization methods have been developed [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref10">10</xref>]. An overview of the most related studies and their results is presented in <xref ref-type="table" rid="table1">Table 1</xref>. The studies were chosen based on whether they contained algorithms that could be implemented with direct volume rendering (DVR). In addition, we focused exclusively on static visualizations, as in some contexts (such as the rendering of virtual vessels in augmented reality during a surgical intervention), it is not possible to have dynamic transformations. Thus, to limit the number of conditions and achieve more uniformity among the conditions, we focused only on static visualizations.</p>
        <p>In all these works, user studies for determining the effectiveness of different visualization techniques were conducted in a laboratory environment under the supervision of a researcher [<xref ref-type="bibr" rid="ref12">12</xref>]. This type of laboratory study has a number of disadvantages: the lack of diversity between the participants (who are often young college students) [<xref ref-type="bibr" rid="ref12">12</xref>] and a limited pool of participants or, conversely, a high monetary cost for studies that have many participants [<xref ref-type="bibr" rid="ref13">13</xref>]. As can be seen in the table, the number of participants per study was typically between 10 and 20. To overcome these issues, alternative user study paradigms such as crowdsourcing and gamification were explored [<xref ref-type="bibr" rid="ref12">12</xref>].</p>
        <p>Although crowdsourcing has previously been used to evaluate medical image visualization techniques [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>], to the best of our knowledge, gamification has not been previously used for psychophysical experiments that study the effectiveness of medical visualization techniques. In our study, we used the gamification paradigm to collect data on the effectiveness of different perceptually driven vascular volume visualization techniques. Specifically, we developed a mobile app, <italic>Connect Brain</italic>, with 2 different games that we distributed on the web. The app was published on Google Play (Google LLC) [<xref ref-type="bibr" rid="ref14">14</xref>] and the App Store (Apple Inc) [<xref ref-type="bibr" rid="ref15">15</xref>]. Using the developed game, we evaluated the possibility of using the gamification paradigm to conduct user studies on medical imaging. Specifically, the developed game had similar research questions and metrics to those in prior laboratory studies (eg, the studies by Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>], Ropinski et al [<xref ref-type="bibr" rid="ref4">4</xref>], and Abhari et al [<xref ref-type="bibr" rid="ref6">6</xref>]) that evaluated the effectiveness of diverse cerebral vessel visualization techniques. We introduced specific gamification elements, such as levels, points, and leaderboards, to engage the participants and made the games available on the App Store [<xref ref-type="bibr" rid="ref15">15</xref>] and Google Play [<xref ref-type="bibr" rid="ref14">14</xref>] to reach a wider participant base. This paper is based on chapter 3 of the first author’s master’s thesis [<xref ref-type="bibr" rid="ref16">16</xref>].</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Related works on depth volume rendering vascular visualization techniques.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="170"/>
            <col width="250"/>
            <col width="120"/>
            <col width="130"/>
            <col width="170"/>
            <col width="160"/>
            <thead>
              <tr valign="top">
                <td>Study</td>
                <td>Visualizations</td>
                <td>Participants, n</td>
                <td>Trials and sample points</td>
                <td>Goals</td>
                <td>Metrics</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Ropinski et al [<xref ref-type="bibr" rid="ref4">4</xref>]</td>
                <td>Phong, stereo, chroma, pseudochroma, overlaid edges, blended edges, perspective edges; edge shading; DoF<sup>a</sup>; and DoF+pseudochroma</td>
                <td>14</td>
                <td>50 × 14 = 700</td>
                <td>Depth comparison</td>
                <td>Correctness, time, and user feedback</td>
              </tr>
              <tr valign="top">
                <td>Abhari et al [<xref ref-type="bibr" rid="ref6">6</xref>]</td>
                <td>No cue and edge</td>
                <td>10</td>
                <td>60 × 10 = 600</td>
                <td>Connectivity</td>
                <td>Correctness, time, and expert feedback</td>
              </tr>
              <tr valign="top">
                <td>Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>]</td>
                <td>No cue, kinetic, stereo, edge, pseudochroma, and fog+combined cues (for novice experiments only)</td>
                <td>2 studies: 13 novices and 6 experts</td>
                <td>160 × 13 = 2080 (novice); 6 × 50 = 300 (expert)</td>
                <td>Depth comparison</td>
                <td>Correctness, time, and user feedback</td>
              </tr>
              <tr valign="top">
                <td>Drouin et al [<xref ref-type="bibr" rid="ref7">7</xref>]</td>
                <td>Shading, pseudochroma, fog, dynamic shading, dynamic pseudochroma, and dynamic fog</td>
                <td>20</td>
                <td>80 × 20 = 1600</td>
                <td>Depth comparison and targeting or reaching</td>
                <td>Correctness, time, pointer-target distance, and user feedback</td>
              </tr>
              <tr valign="top">
                <td>Kreiser et al [<xref ref-type="bibr" rid="ref10">10</xref>]</td>
                <td>Phong, chroma, pseudochroma, VSS<sup>b</sup> chroma, and VSS pseudochroma</td>
                <td>19</td>
                <td>150 × 19 = 2850</td>
                <td>Depth comparison</td>
                <td>Correctness and time</td>
              </tr>
              <tr valign="top">
                <td>Titov et al [<xref ref-type="bibr" rid="ref11">11</xref>]</td>
                <td>Shading, pseudochroma, fog, dynamic shading, dynamic pseudochroma, and dynamic fog; all cues were visualized with a VR HMD<sup>c</sup></td>
                <td>12</td>
                <td>80 × 12 = 960</td>
                <td>Depth comparison and targeting or reaching</td>
                <td>Correctness, time, pointer-target distance, head movement, and user feedback</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>DoF: depth of field.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>VSS: void space surface.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>VR HMD: virtual reality head-mounted display.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Gamification</title>
        <p>Gamification is similar to crowdsourcing and shares its advantages [<xref ref-type="bibr" rid="ref12">12</xref>]. Crowdsourcing is a method of conducting user studies that distributes a given task to a larger network of participants [<xref ref-type="bibr" rid="ref12">12</xref>]. An example of a platform for crowdsourcing is the Amazon Mechanical Turk (MTurk) [<xref ref-type="bibr" rid="ref17">17</xref>], which has been used in studies on a variety of topics, such as the perceptual effectiveness of line drawings to depict shapes [<xref ref-type="bibr" rid="ref18">18</xref>], natural language processing [<xref ref-type="bibr" rid="ref19">19</xref>], and audio transcription [<xref ref-type="bibr" rid="ref20">20</xref>]. Crowdsourcing enables a larger study population than traditional methods because the task can be distributed on the web. In addition, the participant pool becomes more diverse because the study is no longer limited to a physical environment (eg, a university laboratory). Finally, crowdsourcing is less time consuming for each individual participant and allows a lower per-participant cost [<xref ref-type="bibr" rid="ref17">17</xref>]. This model also has some disadvantages; the main disadvantage being low data quality because researchers do not have much control over the unfolding of the experiment and because participants may be motivated only by monetary gain [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>].</p>
        <p>The main difference between gamification and crowdsourcing is that gamification introduces gaming elements to the study [<xref ref-type="bibr" rid="ref12">12</xref>]. Through gamification, a study is transformed into a game that is fun to play, and the gameplay data are collected and analyzed as the results of the study. The most important advantage of gamification is that users are motivated to perform well, which consequently increases the quality of the collected data compared with crowdsourcing. Further, players are motivated to perform well not because of monetary incentives but because they enjoy playing the game [<xref ref-type="bibr" rid="ref13">13</xref>]. As gamification scales well with a large number of participants (because players download and play the games on their own devices), these types of studies have an even lower runtime cost than crowdsourcing [<xref ref-type="bibr" rid="ref13">13</xref>]. However, there are several disadvantages. First, not every study can be transformed into a game that is fun to play. Furthermore, developing and publishing a game requires more time and effort than creating an experimental task. Finally, for success, the researcher should develop interesting game mechanics that follow the rules of game design [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
        <p>The goal of our work is to determine whether the gamification paradigm is a valid approach to performing user studies, specifically in the context of medical imaging.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Overview</title>
        <p>Connect Brain was developed using the Unity engine (Unity Technologies) [<xref ref-type="bibr" rid="ref21">21</xref>] for the Android and iOS platforms. Before starting to play the game, all players had to provide informed consent for their gameplay data to be collected anonymously and used for research purposes. They could do this by manually checking the corresponding box during the initial profile creation. In addition, an email address was provided in case players had any questions regarding the user study.</p>
        <p>A total of 7 different visualizations were implemented in the mobile app: Blinn-Phong shading [<xref ref-type="bibr" rid="ref22">22</xref>], edge enhancement [<xref ref-type="bibr" rid="ref23">23</xref>], aerial perspective (also called fog) [<xref ref-type="bibr" rid="ref5">5</xref>], chromadepth [<xref ref-type="bibr" rid="ref24">24</xref>], pseudochromadepth [<xref ref-type="bibr" rid="ref4">4</xref>], and chromadepth and pseudochromadepth versions of void space surfaces (VSSs) [<xref ref-type="bibr" rid="ref10">10</xref>]. In all visualizations, the medical data set was rendered using real-time DVR. Note that all visualizations are shaded using the Blinn-Phong shading model in addition to the specified method.</p>
      </sec>
      <sec>
        <title>Implemented Visualizations</title>
        <p>In the following section, we describe the details of the vascular volume visualization techniques (<xref rid="figure1" ref-type="fig">Figure 1</xref>) that were implemented in the Connect Brain game.</p>
        <p><italic>Blinn-Phong shading</italic> [<xref ref-type="bibr" rid="ref22">22</xref>] is a photorealistic illumination model that describes how a surface reflects light when illuminated by one or multiple light sources. Similar to Drouin et al [<xref ref-type="bibr" rid="ref7">7</xref>], we used it as the baseline visualization technique. In our implementation, a single-directional light source was used whose direction was parallel to the view direction (<xref rid="figure1" ref-type="fig">Figure 1</xref>A). In terms of color, both the volume and the light source were white.</p>
        <p><italic>Edge enhancement</italic> is used to emphasize the occlusion depth cue, where a viewer determines the relative depth between different objects based on the way they overlap [<xref ref-type="bibr" rid="ref23">23</xref>]. In vessel visualization, the contours of vessels are emphasized, typically by rendering dark lines around the edges of the vessels [<xref ref-type="bibr" rid="ref25">25</xref>] (<xref rid="figure1" ref-type="fig">Figure 1</xref>E). This cue is especially helpful when the transfer function (TF) produces a translucent result. In this case, the highly contrasted black silhouettes occlude the silhouettes of the vessels that are farther away from the viewer, thus providing a better understanding of the depth ordering of vessels.</p>
        <p>Following the work of Drouin and Collins [<xref ref-type="bibr" rid="ref23">23</xref>], in our implementation, edge enhancement was combined with Blinn-Phong shading. To do this, the volume is rendered using Blinn-Phong shading, and each pixel that forms the silhouette is darkened based on its interpolated normal vector. Pixels with a gradient that is almost perpendicular to the viewer are considered part of the silhouette. Drouin et al [<xref ref-type="bibr" rid="ref23">23</xref>] described the following formula for calculating the intensity of edge enhancement for a given pixel:</p>
        <disp-formula>
          <graphic xlink:href="neuro_v2i1e45828_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
        <p>
          <bold>(1)</bold>
        </p>
        <p>where <italic>α</italic> is the intensity of the edge enhancement factor, <inline-graphic xlink:href="neuro_v2i1e45828_fig5.png" xlink:type="simple" mimetype="image"/>
 is the gradient (normal vector) of the surface, <inline-graphic xlink:href="neuro_v2i1e45828_fig6.png" xlink:type="simple" mimetype="image"/> is the direction of the ray (from the volume toward the viewer), and <italic>stepMin</italic> and <italic>stepMax</italic> are user-defined parameters.</p>
        <p><italic>Aerial perspective</italic> (sometimes referred to as fog) is a monocular depth cue caused by the atmosphere and the way in which light scatters. Specifically, the farther the distance between an object and a viewer, the less contrast there is between the object and the background. With this technique, the vessels that are closer to the viewer appear more saturated and more contrasted, whereas farther vessels fade into the background [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>] (<xref rid="figure1" ref-type="fig">Figure 1</xref>D). By comparing the saturation of 2 vessels, it is possible to deduce which one is closer and which one is farther away.</p>
        <p>To render a data set with an aerial perspective cue, the pixels representing the color should be correctly blended with the background. Rheingans and Ebert [<xref ref-type="bibr" rid="ref26">26</xref>] described the following formula for distance-color blending:</p>
        <p>
          <italic>C = (1 – d) c<sub>o</sub> + d c<sub>b</sub></italic>
          <bold>(2)</bold>
        </p>
        <p>where <italic>d</italic> is the depth of the volume at the current pixel in the range of {0,1}, <italic>c<sub>o</sub></italic> is the color of the object, and <italic>c<sub>b</sub></italic> is the color of the background. Preim et al [<xref ref-type="bibr" rid="ref5">5</xref>] noted that the relationship between the depth of the projected vessel and saturation of the pixel does not need to be linear but can rather be exponential (by replacing <italic>d</italic> with an exponential function). To ensure the visualization of the entire volume (such that no vessels are blended completely into the background), Kersten et al [<xref ref-type="bibr" rid="ref27">27</xref>] determined that the best upper bound for <italic>d</italic> was between 0.75 and 0.85. In our implementation, we used the original linear formulation with <italic>d</italic>=0.8.</p>
        <p><italic>Chromadepth</italic>, a technique developed by Steenblik [<xref ref-type="bibr" rid="ref28">28</xref>], encodes depth using color. Specifically, the color of the pixels in depth follows the colors of the visible light spectrum, starting from red; progressing through orange, yellow, green, and cyan; and concluding with blue [<xref ref-type="bibr" rid="ref24">24</xref>]. Thus, for a vascular volume, the closest vessels are red, the farthest vessels are blue, and vessels in between have a color that is linearly interpolated between these values (<xref rid="figure1" ref-type="fig">Figure 1</xref>B). Bailey and Clark [<xref ref-type="bibr" rid="ref24">24</xref>] described the chromadepth TF as a 1D texture containing all colors (from red to blue), where <italic>s</italic> is defined as the sampling parameter. <italic>D<sub>1</sub></italic> and <italic>D<sub>2</sub></italic> are parameters defined by the viewer such that <italic>D<sub>1</sub>≥0</italic>, <italic>D<sub>2</sub>≥1</italic>, and <italic>D<sub>1</sub>&lt;D<sub>2</sub></italic>, and for any depth <italic>d</italic> where <italic>d ε {0,1}</italic>, TF is defined as follows:</p>
        <p>if <italic>d&lt;D<sub>1</sub></italic>, then the color of the pixel is red, and if <italic>d&gt;D<sub>2</sub></italic>, then the color of the pixel is blue; otherwise, <inline-graphic xlink:href="neuro_v2i1e45828_fig7.png" xlink:type="simple" mimetype="image"/> <bold>(3)</bold></p>
        <p><xref rid="figure2" ref-type="fig">Figure 2</xref>A shows the TF used in our implementation for chromadepth as well as a sample volume shaded in this manner.</p>
        <p><italic>Pseudochromadepth</italic>, which incorporates only 2 colors (red and blue) instead of the full color spectrum, was used by Ropinski et al [<xref ref-type="bibr" rid="ref4">4</xref>] to deal with the large number of hues presented in a chromadepth image, which can distract the viewer from the understanding of the depth. Red and blue colors are used (<xref rid="figure1" ref-type="fig">Figure 1</xref>C) because of the visual phenomena of chromostereopsis [<xref ref-type="bibr" rid="ref29">29</xref>], which is caused by the light of different colors refracting into different parts of the retina in the eye depending on the wavelength. Chromostereopsis can be used to make red objects appear closer in depth than blue objects.</p>
        <p>When using pseudochromadepth for vasculature, the closest vessels are red; the farthest vessels are blue; and for any intermediate depth, the color of the pixel is calculated by interpolating between red and blue. Thus, using the pseudochromadepth depth cue, a depth comparison between 2 shaded objects can be simplified to a simple comparison of the hue, with warmer hues representing closer objects and colder hues representing farther objects. The pseudochromadepth cue was implemented in the same way as chromadepth, with the only difference being that the 1D rainbow-like texture was replaced by one where the color is linearly interpolated between red and blue, as shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>B.</p>
        <p><italic>VSS</italic>, a technique used in vessel visualization, was developed by Kreiser et al [<xref ref-type="bibr" rid="ref10">10</xref>] (<xref rid="figure1" ref-type="fig">Figures 1</xref>F and 1G). Unlike many other vessel visualization techniques that are based on shading the vessels in a certain manner, VSS concentrates on shading the area around the vessels; the background is colored to indicate the relative depth of the surrounding vessels. Therefore, to understand the relative depth of a certain vessel, one must look at the color of the background that surrounds the vessel. The motivation behind VSS is that in more traditional depth rendering methods, there is a lot of unused empty space. Therefore, instead of being limited by the area that vessels occupy on the screen, the entire screen can be used, allowing the vessel pixels to represent any other information that may be deemed necessary.</p>
        <p>To determine the color of each pixel, a weighted average of the depths of the surrounding border pixels is calculated. To do this, a rendered image of a vessel structure in the form of a depth map on which the filled pixels (representing the volume) can be distinguished from the empty pixels (representing the background) is required. The Suzuki and Abe [<xref ref-type="bibr" rid="ref30">30</xref>] border-following algorithm is then executed on the depth map, creating a hierarchy of the borders of the depth map. This hierarchy indicates what border pixels contribute to what part of the background. Subsequently, the interpolated depth for each background pixel is calculated using inverse distance weighting [<xref ref-type="bibr" rid="ref31">31</xref>]:</p>
        <disp-formula>
          <graphic xlink:href="neuro_v2i1e45828_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
        <p>
          <bold>(4)</bold>
        </p>
        <p>where <italic>Depth</italic> is the calculated depth of the background pixel, <italic>p<sub>i</sub></italic> is the <italic>i</italic>th border pixel whose depth is used in the weighted average calculation, <italic>N</italic> is the total number of border pixels that affect the depth of <italic>p<sub>b</sub></italic>, <italic>w (p<sub>i</sub>)</italic>, is the weight of the border pixel <italic>p<sub>i</sub></italic>, and <italic>d (p<sub>i</sub>)</italic> is the depth of the border pixel <italic>p<sub>i</sub></italic>.</p>
        <p>The weight <italic>w (p<sub>i</sub>)</italic> of a border pixel <italic>p<sub>i</sub></italic> is calculated in the following manner:</p>
        <disp-formula>
          <graphic xlink:href="neuro_v2i1e45828_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </disp-formula>
        <p>
          <bold>(5)</bold>
        </p>
        <p>where <italic>p<sub>b</sub></italic> is the background pixel for which the depth calculation is performed, <italic>p<sub>i</sub></italic> is the <italic>i</italic>th border pixel whose depth is used in the weighted average calculation, <italic>m (p<sub>b</sub></italic>, <italic>p<sub>i</sub>)</italic> is the magnitude of the vector between the position of the pixel <italic>p<sub>b</sub></italic> and <italic>p<sub>i</sub></italic>, and <italic>s</italic> is a user-defined smoothing parameter that results in closer border pixels giving exponentially more weight.</p>
        <p>After calculating the depth of every background pixel, a TF is applied to the depths, transforming them into a color. Typically, chromadepth (<xref rid="figure1" ref-type="fig">Figure 1</xref>F) and pseudochromadepth (<xref rid="figure1" ref-type="fig">Figure 1</xref>G) are used [<xref ref-type="bibr" rid="ref10">10</xref>]. In addition, VSS implements an approximated version of global illumination in the form of screen space directional occlusion (SSDO) [<xref ref-type="bibr" rid="ref32">32</xref>]. SSDO darkens some regions of the generated VSS that may be occluded from the light emitted by neighboring parts of the VSS and performs an indirect light bounce. Finally, isolines are generated on the surface of the VSS in the form of black lines to improve the understanding of the generated shape by the VSS.</p>
        <p>Owing to the hardware limitations of mobile devices, we used screen space ambient occlusion [<xref ref-type="bibr" rid="ref33">33</xref>] instead of SSDO, which does not include indirect bounce.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>All the implemented vessel visualization techniques: (A) shading (Blinn-Phong), (B) chromadepth, (C) pseudochromadepth, (D) aerial perspective, (E) edge enhancement, (F) void space surface (VSS) chromadepth, and (G) VSS pseudochromadepth.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e45828_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>(A) Chromadepth and (B) pseudochromadepth with 1D transfer functions indicating near to far color mapping.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e45828_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>The user study was approved by the Natural Sciences and Engineering Research Council of Concordia University (certification 30016074).</p>
      </sec>
      <sec>
        <title>DVR on the Mobile Device</title>
        <p>To visualize the volumes, the DVR technique described by Drouin et al [<xref ref-type="bibr" rid="ref23">23</xref>], which is based on a well-known 2-pass rendering algorithm described by Kruger et al [<xref ref-type="bibr" rid="ref34">34</xref>], was used. This technique describes a real-time ray casting algorithm that consists of 2 rendering passes. In the first pass, the front and back faces of a colored cube representing the bounding box of the volume are rendered into 2 different textures. The red, green, and blue colors encode the start and end positions (as 3D coordinates) of the ray for each pixel. In the second pass, for each pixel, a ray is sent through the volume, and the opacity is accumulated while sampling the volume using trilinear interpolation. The ray stops, and the distance traveled by the ray is recorded into a third texture. Next, a compute shader scans the third texture to determine the smallest and highest nonzero depths of the texture such that the visible interval of the volume inside the 3D texture is known. Finally, in the second pass, the final image of the volume is rendered using the recorded pixel depths, which are adjusted using the minimum and maximum values calculated previously so that the entire range of depth values (from 0 to 1) lies within the visible part of the volume. A TF maps the adjusted depth values to the red, green, blue, and alpha colors for each pixel. This TF is encoded as a 1D texture that is passed to the shader.</p>
        <p>As mobile device graphics processing units are typically slower than their desktop equivalents, additional optimizations were made to allow for real-time rendering. First, the ray casting algorithm was simplified so that instead of accumulating opacity at each ray step until full opacity was reached, the ray stopped immediately when the sampled value in the volume reached a given threshold, similar to the early ray termination described by Levoy [<xref ref-type="bibr" rid="ref35">35</xref>]. Second, the ray casting algorithm was modified to reduce the frequency at which the volume was sampled. To achieve this, the 3D Chamfer distance approach described by Zuiderveld et al [<xref ref-type="bibr" rid="ref36">36</xref>] was used. This method speeds up ray casting without compromising the quality of the rendered image by determining the distance to the closest nonzero voxel for every voxel and storing it in a 3D texture. This distance corresponds to the number of voxels that must be traversed to create a path in 3D space, assuming a 26-cell cubic neighborhood. Here, a small threshold value was defined to distinguish the <italic>empty</italic> voxels from the nonempty voxels. When performing ray casting, the value from the Chamfer distance 3D texture, which indicates the distance that the ray can safely travel without missing any interesting voxels, is used. Thus, the empty areas of the volume are traversed faster. It should be noted that although the algorithm does not compromise the quality of the volume, it requires more space to store the additional volume.</p>
        <p>Finally, to save the battery life of the mobile device and have a smoother user interface, when the volume is not being rotated, it is rendered once to a texture and then displayed in future frames. In addition, when the volume is rotated, it is temporarily downscaled during ray casting, the smaller volume is rendered to a temporary frame buffer, and then the image obtained from this frame buffer is upscaled using linear interpolation. The intensity of the downscaling is directly proportional to the speed of the rotation of the volume, making the downsampling less perceptible to the viewer.</p>
        <p>Using these optimizations, real-time rendering was achieved on the mobile devices tested for all cues except VSSs. Despite attempts to improve the calculation time of VSS, rendering times of only a few seconds per frame were achieved. As a result, a static version of VSS that cannot be interacted with was used in Connect Brain.</p>
      </sec>
      <sec>
        <title>Connect Brain Gameplay</title>
        <p>Connect Brain consists of two minigames: (1) the <italic>Near-Far Game</italic>, a game in which players compare the relative depth between the indicated vessels, and (2) the <italic>Blood Circulation Game</italic>, a game in which players must understand the connectivity between different points in the vascular volume (<xref rid="figure3" ref-type="fig">Figure 3</xref>, where the phone frame was adapted from Wikimedia [<xref ref-type="bibr" rid="ref37">37</xref>]; the original uploader of the frame was MDXDave at German Wikipedia, CC BY-SA 3.0 [<xref ref-type="bibr" rid="ref38">38</xref>]). Both minigames are split into a tutorial level that teaches the player the basics of the minigame and 11 levels that can be played in any order after the completion of the tutorial. Each level is defined by 4 parameters: the CTA data set used, threshold used for early ray termination, depth of the near and far clipping planes, and number of points selected on the volume (≥2). Each level in the game consists of 14 rounds in total, with each round showing a single visualization among those that were implemented. A legend was always present to help the players understand the color encodings for each visualization, and the player could also read the description of the visualization by pressing on a question mark icon. To avoid confusing the player and prevent biases, we decided to use the same visualization technique for 2 consecutive rounds before randomly selecting a new visualization. Videos demonstrating the gameplay of these games can be found in the multimedia appendices (see <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> for the Near-Far Game and <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> for the Blood Circulation Game).</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Connect Brain screenshots: (A) gameplay of the Near-Far Game, (B) feedback for the Near-Far Game, (C) gameplay of the Blood Circulation Game, and (D) feedback for the Blood Circulation Game. Phone frame source: adapted from Wikimedia. The original uploader was MDXDave at German Wikipedia, CC BY-SA 3.0.</p>
          </caption>
          <graphic xlink:href="neuro_v2i1e45828_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Near-Far Game</title>
        <p>The <italic>Near-Far Game</italic> focuses on understanding the relative depth between vessels. This game is based on the experimental task described and used by Ropinski et al [<xref ref-type="bibr" rid="ref4">4</xref>], Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>], and Kreiser et al [<xref ref-type="bibr" rid="ref10">10</xref>]. The typical experimental task involves participants determining the nearest vessel between 2 selected vessels rendered using a given visualization technique. The Near-Far Game in our app uses the same principle but introduces some gameplay elements to make it more fun for players.</p>
        <p>Players are presented with a CTA on which ≥2 points on vessels are indicated. The task of the player is to connect the points from the point closest to them to the point farthest from them using their finger. The points are indicated on the volume using a contrasting color, and to ensure that they are visible, a black and white circle is placed around them (<xref rid="figure3" ref-type="fig">Figure 3</xref>A). This circle also indicates the region where the player can touch the screen to select the point. To further help indicate the positions of the points, arrows appear on the screen, indicating the location of the points during the first second of each round. The selected points and view of the CTA are randomly chosen, meaning that the player cannot simply learn the correct answers. This also makes replaying a level more interesting, as the player will always have new data to view and interact with. Although random, a number of rules are applied to choose the points: (1) they are always clearly visible from the player’s perspective; (2) they have a small minimum depth difference between them; and (3) there is a minimum <italic>xy</italic> pixel position difference between them, which is equal to the diameter of the black-and-white circle × 1.5 to avoid the overlapping of 2 indicator circles.</p>
        <p>By connecting the points in the correct order, the player gains score points; and additional bonus points are provided for doing this quickly. The number of bonus points is calculated by applying a reciprocal function to the round time. However, if the player makes an incorrect decision, the bonus is subtracted from their current score. This gives players an incentive to complete rounds as fast as possible while simultaneously motivating them to make accurate decisions. Further, the score accumulates through the rounds and is saved on a global leaderboard where players can compare their score to others. The score of a player is only visible to other players if it is one of the top 3 scores for the current level, and this setting cannot be changed.</p>
        <p>Some levels have rounds in which &gt;2 points are indicated to the player. In these rounds, the player can connect any number of points at once. The goal in this case is to select all the connected points in the ascending depth order, starting from the closest point in terms of depth (similar to the work of Ritter et al [<xref ref-type="bibr" rid="ref3">3</xref>]). However, if a point with a larger depth is selected before a point with a smaller depth, then the entire selection is considered incorrect, and the player loses the bonus time points. If all the points are connected in the correct order, the player will receive significantly more points than if they connected each pair of points individually. Thus, selecting multiple points at once is a high-risk, high-reward strategy.</p>
        <p>During gameplay, we enable players to rotate the volume as a last resort measure when they get stuck at a certain round. The players can rotate the volume with an offset of up to 45° from the initial position. If <italic>x</italic> and <italic>y</italic> are the rotation in degrees around the <italic>x-</italic> and <italic>y-</italic>axis from the initial position, then the rotation of the volume always follows the formula <inline-graphic xlink:href="neuro_v2i1e45828_fig10.png" xlink:type="simple" mimetype="image"/>
. To discourage rotation (as we wanted players to understand the data using the given visualization technique), we designed the game such that players lose score points for rotating the volume. The amount of points lost is directly proportional to the rotation of the volume in degrees. This feature was added to reduce the frustration of the player and lower the chance that they will completely abandon the game.</p>
        <p>A preliminary in-laboratory study was conducted with 12 participants to test the gameplay aspect of Connect Brain. One of the findings of this preliminary study was that users wanted to know how they were wrong when they made an incorrect decision. Thus, a feedback feature was added; if enabled, at the end of each incorrectly completed round, the volume is rotated by 90° around the x-axis so that the points that are closer to the viewer are positioned on the bottom of this view and the points that are farther are positioned on the top. Vertical lines are then drawn like a ruler to demonstrate the relative depth between the points (<xref rid="figure3" ref-type="fig">Figure 3</xref>B).</p>
      </sec>
      <sec>
        <title>Blood Circulation Game</title>
        <p>The <italic>Blood Circulation Game</italic> focuses on the connectivity between different vessels in the vascular volume. This game is an adaptation of the experiment that was described by Abhari et al [<xref ref-type="bibr" rid="ref6">6</xref>], in which participants were presented with static 2D images and asked to determine whether a path exists between 2 selected points on the visible vessel structure. We built on this experiment by adding motivating gameplay features to it.</p>
        <p>As in the <italic>Near-Far Game</italic>, players are presented with ≥2 points selected on the vascular volume. However, the goal of this game is to determine which points are directly connected, in other words, whether a path exists between the 2 vessels. As each selected point on the 2D image is associated with a specific voxel in 3D, connectivity refers to the path between the 2 voxels inside the 3D volume. When the player finds 2 connected points, they link them using their finger in any order. However, if no 2 points seem to be connected with each other, the player should press the “no connected points” button that is located at the bottom of the screen (<xref rid="figure3" ref-type="fig">Figure 3</xref>C).</p>
        <p>As described in the first game, the initial rotation of the volume at the beginning of each round and the selection of points are performed randomly. This means that we need to compute at runtime whether 2 voxels are connected with each other within the 3D data set. To achieve this, the A* search algorithm [<xref ref-type="bibr" rid="ref39">39</xref>], which determines the path (if it exists) between 2 voxels inside a 3D texture, was used. A* is an informed search algorithm that considers both the distance traversed so far and an estimation (heuristic) of the remaining path, allowing it to perform very quickly and find the optimal path in case the heuristic function is admissible (never overestimates the cost to reach the goal). This algorithm requires a priority queue data structure to function, and we chose the Fibonacci heap [<xref ref-type="bibr" rid="ref40">40</xref>] because of its efficient performance. The threshold used to define the boundaries of the vessels during path finding is the same as that used for ray casting.</p>
        <p>The score system works in the same manner as in <italic>Near-Far Game</italic>, with points awarded for correct decisions about whether a path exists and for fast decision response times. The rotation of the volume also works in the same manner, resulting in a loss of points.</p>
        <p>The Blood Circulation Game also features a feedback system; if the player decides that 2 points are connected, but in fact they are not, the feedback view shows the minimum distance that separates the 2 independent parts of the vessel structure. Conversely, if the player decides that no points are connected with each other, but some of them are, then this view demonstrates the path between the connected points (<xref rid="figure3" ref-type="fig">Figure 3</xref>D).</p>
        <p>Once Connect Brain was made available on the Apple App Store and Google Play, we advertised it not only on various social media channels, such as LinkedIn [<xref ref-type="bibr" rid="ref41">41</xref>], Twitter [<xref ref-type="bibr" rid="ref42">42</xref>], and Facebook [<xref ref-type="bibr" rid="ref43">43</xref>], but also through email lists to encourage users to play.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>At the time of our analysis, a total of 111 participants (men: n=68, 61.3%; women: n=39, 35.1%; nonbinary: n=4, 3.6%) had downloaded and played the mobile game. In addition to the 111 participants who played the game, 21 others downloaded it but did not play. Of the 111 participants, 54 (48.6%) played on Android, and the remainder (n=57, 51.4%) played on iOS. Owing to the restriction on the collection of age data on iOS apps, age was collected only from the participants who used the Android version; the age range of these participants was from 14 to 62 (mean 30, SD 11) years. Among the 111 participants, 50 (45%) had experience with medical visualization, 30 (27%) were familiar with angiography, and 36 (32.4%) had experience with vessel visualization techniques. More precisely, of the 111 participants, 26 (23.4%) had experience in all 3 previously listed domains (we refer to them as experts), and 31 (27.9%) had experience in either 1 or 2 domains (we refer to them as semiexperts). All 111 (100%) users participated in the Near-Far Game, completing, on average, 39 (SD 61) rounds, but only 44 (39. 6%) players participated in the Blood Circulation Game, completing, on average, 37 (SD 39) rounds. We hypothesize that the reason why some participants decided to quit the game too early was because they were playing the game in an environment that was not controlled, so they could stop at any moment if they were bored or did not want to continue playing. It is also possible that some players downloaded the game without knowing its purpose and were simply uninterested in playing after downloading. An ANOVA and a post hoc Tukey honest significant difference tests were used to measure and analyze correctness and response time variables. This analysis was performed using the SPSS software (version 26; IBM Corp) [<xref ref-type="bibr" rid="ref44">44</xref>].</p>
        <p>Similar to Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>] and Lawonn et al [<xref ref-type="bibr" rid="ref9">9</xref>], for both games, in addition to correctness and response time, we examined the effect of both the distance between the indicated vessels on the screen (<italic>xy</italic> distance) and the distance in depth between the indicated vessels (<italic>z</italic> distance). Both <italic>xy</italic> and <italic>z</italic> distances were equally divided into 2 categories, <italic>near</italic> or <italic>far</italic>, measured in world coordinates. For the <italic>xy</italic> variable, the ranges are defined in the following manner: near (0.162-0.369) and far (0.369-0.951). For the <italic>z</italic> variable, the ranges are defined as follows: near (0.021-0.104) and far (0.104-0.792; note that <italic>z</italic> distances are distributed unequally because the close and far clipping planes in some levels greatly limit the total depth range of the volume, resulting in a larger number of entries with a small depth distance).</p>
        <p>Owing to a lack of control over the timing and how the game was played (eg, a person might get interrupted during the game, thus increasing the decision time), we removed all extreme outliers equal to <italic>Q<sub>3</sub> + 3</italic> × <italic>IQR</italic>, where <italic>Q<sub>3</sub></italic> represents the value at the third quartile and IQR equal to <italic>Q<sub>3</sub> – Q<sub>1</sub></italic>. In addition, we discarded all data completed during the tutorial levels.</p>
      </sec>
      <sec>
        <title>Near-Far Game</title>
        <p>A total of 5367 entries were collected for the Near-Far Game. In cases where multiple points (3 or 4) were connected simultaneously, each individual pair of connected points was treated as an individual entry.</p>
        <sec>
          <title>Correctness</title>
          <p>Correctness was represented by either 1 (correct) or 0 (incorrect) and determined based on whether the connection between points was done in the correct order. The mean correctness and SE for each visualization method are shown in <xref ref-type="table" rid="table2">Table 2</xref>. A 3-way repeated measures ANOVA was used to examine the main effects as well as the interactions of the visualization method, <italic>xy</italic> distance, and <italic>z</italic> distance, as they relate to correctness. The ANOVA showed that the visualization method had a significant effect on correctness (<italic>F</italic><sub>6,5339</sub>=22.404; <italic>P</italic>&lt;.001). A Tukey post hoc test showed that pseudochromadepth (mean 83%, SE 1.5%), aerial perspective (mean 82%, SE 1.5%), and chromadepth (mean 81%, SD 1.5%) allowed for better depth perception than VSS chromadepth (mean 72%, SE 1.6%), VSS pseudochromadepth (mean 72%, SE 1.6%), edge enhancement (mean 66%, SE 1.6%), and shading (mean 65%, SE 1.6%). Although both VSS versions performed better than shading and edge enhancement, only the difference with shading was found to be statistically substantial according to the Tukey honestly significant difference test.</p>
          <p>We found a significant main effect of distance on correctness (<italic>F</italic><sub>1,5339</sub>=24.708; <italic>P</italic>&lt;.001). As expected, the near <italic>z</italic> distance (mean 71%, SE 0.9%) resulted in worse correctness compared with the far <italic>z</italic> distance (mean 77%, SE 0.8%). However, we found no main effect of the <italic>xy</italic> distance on correctness (<italic>F</italic><sub>1,5339</sub>=1.329; <italic>P</italic>=.25). Moreover, there was no significant 2-way interaction between <italic>xy</italic> distance and visualization method on correctness of depth ordering (<italic>F</italic><sub>6,5339</sub>=0.627; <italic>P</italic>=.71), between <italic>z</italic> distance and visualization (<italic>F</italic><sub>6,5339</sub>=1.836; <italic>P</italic>=.09), or between the <italic>xy</italic> and <italic>z</italic> distances (<italic>F</italic><sub>1,5339</sub>=0.619; <italic>P</italic>=.43). There was also no significant 3-way interaction between the variables (<italic>F</italic><sub>6,5339</sub>=0.595; <italic>P</italic>=.74).</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Mean correctness and decision time for the Near-Far Game, depending on the visualization that was used<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="270"/>
              <col width="370"/>
              <col width="360"/>
              <thead>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Correctness (%), mean (SE)</td>
                  <td>Time (s), mean (SE)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Arial perspective</td>
                  <td>82 (1.5)</td>
                  <td>4.77 (0.117)</td>
                </tr>
                <tr valign="top">
                  <td>Shading</td>
                  <td>65 (1.6)</td>
                  <td>5.29 (0.120)</td>
                </tr>
                <tr valign="top">
                  <td>Chroma</td>
                  <td>81 (1.5)</td>
                  <td>5.03 (0.117)</td>
                </tr>
                <tr valign="top">
                  <td>Edges</td>
                  <td>66 (1.6)</td>
                  <td>4.98 (0.12)</td>
                </tr>
                <tr valign="top">
                  <td>Pseudochroma</td>
                  <td>83 (1.5)</td>
                  <td>4.89 (0.118)</td>
                </tr>
                <tr valign="top">
                  <td>VSS<sup>b</sup> chroma</td>
                  <td>72 (1.6)</td>
                  <td>5.58 (0.122)</td>
                </tr>
                <tr valign="top">
                  <td>VSS pseudochroma</td>
                  <td>72 (1.6)</td>
                  <td>5.44 (0.122)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table2fn1">
                <p><sup>a</sup>Error bars represent the SE.</p>
              </fn>
              <fn id="table2fn2">
                <p><sup>b</sup>VSS: void space surface.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Decision Time</title>
          <p>The decision time for levels with 2 points corresponds to the interval between the moment when the round starts, <italic>T<sub>o</sub></italic>, and the moment when the finger of the player reaches the second point, <italic>T<sub>2</sub></italic>. When &gt;2 indicated vessels (ie, <italic>n</italic>) are connected in the same level, the time for connecting <italic>n– 1</italic> with <italic>n</italic> is calculated as <italic>T<sub>n</sub> = T<sub>1</sub> + T<sub>n</sub> – T<sub>n–1</sub></italic>. Thus, we consider the time taken to touch the first indicated vessel, which we consider the time taken by the player to make decisions about the spatial layout of the vasculature as a whole, plus the time interval to connect the 2 indicated vessels <italic>n – 1</italic> and <italic>n</italic>. The mean decision time and SE for each visualization method is shown in <xref ref-type="table" rid="table2">Table 2</xref>.</p>
          <p>A 3-way repeated measures ANOVA was used to examine the main effects and interactions of visualization methods, <italic>xy</italic> distance, and <italic>z</italic> distance on decision time. The ANOVA showed that the visualization method had a significant effect on response time (<italic>F</italic><sub>6,5339</sub>=6.334; <italic>P</italic>&lt;.001). A post hoc Tukey test showed that aerial perspective (mean 4.77, SD 0.117 s) and pseudochromadepth (mean 4.89, SE 0.12 s) resulted in the fastest decision times and performed better than both VSS chromadepth (mean 5.58, SE 0.12 s) and VSS pseudochromadepth (mean 5.44, SE 0.12 s). However, only aerial perspective performed better than shading (mean 5.29, SE 0.12 s), which had the third worst decision time. Chromadepth (mean 5.03, SE 0.12 s) and edge enhancement (mean 4.98, SE 0.12 s) were faster than VSS chromadepth but not VSS pseudochromadepth.</p>
          <p>There was a significant main effect of <italic>xy</italic> distance (<italic>F</italic><sub>1,5339</sub>=12.630; <italic>P</italic>&lt;.001) on decision time. Far <italic>xy</italic> distances (mean 5.30, SE 0.06 s) resulted in longer decision times than near <italic>xy</italic> distances (mean 4.98, SE 0.06 s). In addition, there was a significant main effect of <italic>z</italic> distance (<italic>F</italic><sub>1,5339</sub>=12.924; <italic>P</italic>&lt;.001) on decision time. Far <italic>z</italic> distances (mean 4.98, SE 0.06 s) resulted in a shorter decision time than near <italic>z</italic> distances (mean 5.30, SE 0.07 s).</p>
          <p>There was no significant 2-way interaction between the visualization method and the <italic>xy</italic> distance (<italic>F</italic><sub>6,5339</sub>=0.476; <italic>P</italic>=.83), the visualization method and the <italic>z</italic> distance (<italic>F</italic><sub>6,5339</sub>=1.190; <italic>P</italic>=.31), or the <italic>xy</italic> distance and the <italic>z</italic> distance (<italic>F</italic><sub>1,5339</sub>=0.063; <italic>P</italic>=.80). There was no 3-way interaction either (<italic>F</italic><sub>6,5339</sub>=1.455; <italic>P</italic>=.19).</p>
        </sec>
      </sec>
      <sec>
        <title>Blood Circulation Game</title>
        <p>The total number of entries collected for the Blood Circulation Game was 1810. A 3-way repeated measures ANOVA was used to examine the main effects as well as the interactions of visualization method, <italic>xy</italic>-distance, and <italic>z</italic>-distance, as they relate to correctness and response time for the Blood Circulation Game.</p>
        <sec>
          <title>Correctness</title>
          <p>Correctness in the Blood Circulation Game corresponds to whether the player correctly identified the indicated vessels as connected (<xref ref-type="table" rid="table3">Table 3</xref>). The ANOVA showed that there was no main effect of visualization technique (<italic>F</italic><sub>6,1782</sub>=1.383; <italic>P</italic>=.22), <italic>xy</italic> distance (<italic>F</italic><sub>1,1782</sub>=0.032; <italic>P</italic>=.86), or <italic>z</italic> distance (<italic>F</italic><sub>1,1782</sub>=0.004; <italic>P</italic>=.95) on correctness. Furthermore, there was no significant 2-way interaction between the visualization method and <italic>xy</italic> distance (<italic>F</italic><sub>6,1782</sub>=0.867; <italic>P</italic>=.52), between the visualization method and <italic>z</italic> distance (<italic>F</italic><sub>6,1782</sub>=1.406; <italic>P</italic>=.35), or between <italic>xy</italic> distance and <italic>z</italic> distance (<italic>F</italic><sub>1,1782</sub>=2.251; <italic>P</italic>=.13). No significant 3-way interaction was found either (<italic>F</italic><sub>6,1782</sub>=1.536; <italic>P</italic>=.16).</p>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Mean correctness and decision time for the Blood Circulation Game, depending on the visualization that was used<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="270"/>
              <col width="370"/>
              <col width="360"/>
              <thead>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Correctness (%), mean (SE)</td>
                  <td>Time (s), mean (SE)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Arial perspective</td>
                  <td>80 (2.4)</td>
                  <td>3.46 (0.135)</td>
                </tr>
                <tr valign="top">
                  <td>Shading</td>
                  <td>80 (2.5)</td>
                  <td>3.18 (0.137)</td>
                </tr>
                <tr valign="top">
                  <td>Chroma</td>
                  <td>81 (2.5)</td>
                  <td>3.4 (0.138)</td>
                </tr>
                <tr valign="top">
                  <td>Edges</td>
                  <td>84 (2.4)</td>
                  <td>3.27 (0.136)</td>
                </tr>
                <tr valign="top">
                  <td>Pseudochroma</td>
                  <td>87 (2.4)</td>
                  <td>3.11 (0.133)</td>
                </tr>
                <tr valign="top">
                  <td>VSS<sup>b</sup> chroma</td>
                  <td>81 (2.5)</td>
                  <td>3.52 (0.138)</td>
                </tr>
                <tr valign="top">
                  <td>VSS pseudochroma</td>
                  <td>80 (2.5)</td>
                  <td>3.49 (0.141)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table3fn1">
                <p><sup>a</sup>Error bars represent the SE.</p>
              </fn>
              <fn id="table3fn2">
                <p><sup>b</sup>VSS: void space surface.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Decision Time</title>
          <p>The mean decision time and SE for each visualization method are shown in <xref ref-type="table" rid="table3">Table 3</xref>. ANOVA showed that there was a significant 2-way interaction between the <italic>xy</italic> and <italic>z</italic> distances on correctness (<italic>F</italic><sub>1,1782</sub>=4.583; <italic>P</italic>=.03). The combination of far <italic>xy</italic> and far <italic>z</italic> distances correspondingly resulted in a substantially longer decision time (mean 3.59, SE 0.11 s) than any other combination. There were no significant main effects of visualization method (<italic>F</italic><sub>6,1782</sub>=1.441; <italic>P</italic>=.20), <italic>xy</italic> distance (<italic>F</italic><sub>1,1782</sub>=1.550; <italic>P</italic>=.21), or <italic>z</italic> distance (<italic>F</italic><sub>1,1782</sub>=1.559; <italic>P</italic>=.21) on decision time. No significant 2-way interactions were found for the visualization technique and the <italic>xy</italic> distance (<italic>F</italic><sub>6,1782</sub>=1.409; <italic>P</italic>=.21) or for the visualization technique and the <italic>z</italic> distance (<italic>F</italic><sub>6,1782</sub>=1.044; <italic>P</italic>=.40). Finally, no 3-way interaction was found either (<italic>F</italic><sub>6,1782</sub>=0.708; <italic>P</italic>=.64).</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>In general, we found that our results match those of studies that contain a larger number of participants, which suggests that the gamification paradigm is a viable alternative to conducting studies in the domain of medical imaging and, more precisely, angiography visualization.</p>
      <sec>
        <title>Depth Perception and Connectivity</title>
        <p>The analysis of the gameplay data showed that aerial perspective, chromadepth, and pseudochromadepth allow for the best relative depth perception. These techniques led to the most correct responses and the quickest times, although only aerial perspective resulted in a faster decision time than shading. For vessel connectivity, no cue performed substantially better than the others.</p>
        <p>Similar to the study by Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>], we found that for depth perception, the aerial perspective and pseudochromadepth visualization techniques performed very well in terms of both correctness and decision time. However, unlike Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>] and Ropinski et al [<xref ref-type="bibr" rid="ref4">4</xref>], who found pseudochromadepth to be significantly better than chromadepth, we found no difference between the cues. However, this is in line with the results reported by Kreiser et al [<xref ref-type="bibr" rid="ref10">10</xref>], who found no difference between these 2 cues.</p>
        <p>As for the VSS cues, we found that they performed slightly worse compared with the results obtained by Kreiser et al [<xref ref-type="bibr" rid="ref10">10</xref>]. Although VSS chromadepth and VSS pseudochromadepth resulted in a substantially higher accuracy than shading, both performed worse than the non-VSS versions of chromadepth and pseudochromadepth. In terms of decision response time, we found a similar result to that found by Kreiser et al [<xref ref-type="bibr" rid="ref10">10</xref>]; VSS had longer times than the directly applied visualization methods. This can be expected owing to the indirect nature of this vessel visualization technique. The correctness results may be explained by the fact that the visualized vasculature is complex, and on small devices (eg, smartphones), there is a limited amount of background, which is needed for VSS. In addition, because of the hardware limitations of mobile devices, VSS was the only cue that was not adjusted in real time when the player was rotating the volume. However, despite this constraint, VSS cues still managed to be more effective than shading, so VSS would be preferable in a context where the color of the vessels cannot be changed.</p>
        <p>Edge enhancement was not found to be an effective cue. In terms of depth perception, it resulted in the lowest correct responses, similar to shading. In terms of decision response times, it was substantially better than only VSS chromadepth, and VSS techniques are known to require a significant amount of time to understand. In terms of vessel connectivity understanding, unlike Abhari et al [<xref ref-type="bibr" rid="ref6">6</xref>], edge enhancement did not improve accuracy or decision time. In fact, this visualization technique had no significant impact on either correctness or response time in terms of understanding vessel connectivity. We posit that this is the case because we tended to demonstrate simpler vessel structures in the Blood Circulation Game, which was achieved by using closer clipping planes to avoid having all vessels connected with each other. The negative side effect of this was that accuracy was high across all visualizations, and decision times were generally similar. These similarities in time could be explained by the fact that players rotated the volume using their finger, but even after removing all entries where players rotated the volume, no effect was observed on the decision time.</p>
        <p>In terms of distances between the indicated vessels, as expected, having a far <italic>z</italic> distance between the vessels improves relative depth perception and, surprisingly, decision time, which is different from what was observed by Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>]. The reason behind shorter decision times at long <italic>z</italic> distances could be that with shorter z distances, the players had to resort to rotating the volume with their finger to understand the depth using motion parallax. Regarding <italic>xy</italic> distance, although it had no effect on accuracy, it did have an effect on the decision response times, with longer <italic>xy</italic> distances resulting in a longer decision time. This may have been caused by the fact that for longer <italic>xy</italic> distances, players had to perform a longer gesture when connecting the indicated vessels. By contrast, in the Blood Circulation Game, where players had to perform a similar gesture, a long <italic>xy</italic> distance resulted in longer decision times only when it was combined with a long <italic>z</italic>-distance, which could mean that the hand gesture does not have a big impact on the decision time. Another reason for this is that players may look back and forth between indicated vessels more often in case of longer distances.</p>
        <p>For the Blood Circulation Game, the combination of long <italic>xy</italic> and long <italic>z</italic> distances resulted in the longest decision times. This may have been because in such a combination, the vessels were the farthest apart from each other, so players had to analyze the data set more carefully to draw any conclusion about the connectivity.</p>
      </sec>
      <sec>
        <title>Crowdsourcing and Gamification</title>
        <p>In this paper, we describe the results of a study that compared the effectiveness of cerebral blood vessel visualization techniques, which was conducted using a mobile game, rather than in a traditional laboratory setting. Similar to previous studies, we found that aerial perspective, chromadepth, and pseudochromadepth allow for the best relative depth perception. In terms of determining the connectivity between 2 vessels, we found that the visualization method did not affect the result.</p>
        <p>What differentiates our study from related works is the gamification paradigm that was used to conduct the study. Rather than having participants perform an experiment in a laboratory, we created a mobile game that was distributed using mobile app distribution platforms. Gamification presented multiple advantages compared with traditional in-laboratory user studies. First, it allowed us to have a high number of participants (111 at the time of analysis) with no additional per-participant cost. Second, the participants were also highly diverse, with 39 (35.1%) out of 111 participants identifying as women and 4 (3.6%) identifying as nonbinary. Third, gamification made it easier for us to recruit experts, as 16 (62%) out of 26 experts downloaded the app either from another country or another province of Canada, whereas among the semiexperts, this proportion was 18 (58%) out of 31. Finally, in cases where the study targets a broader range of participants, including nonexperts, gamification incentivizes the nonexperts to join because they might be interested in the game elements rather than the domain of the study. If we look at the average number of rounds completed by experts and semiexperts combined (mean 63, SD 107), it is approximately the same as that for nonexperts (mean 60, SD 83), which indicates that the interests of the 2 groups were approximately the same toward the game. We hypothesize that experts and semiexperts were primarily interested in continuing to play the game because of the domain of study, whereas nonexperts were interested because of the game elements, such as competing for a high score.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Gamification also presented some important disadvantages, both during the development of the game and with data collection.</p>
        <p>First, transforming the experiment into a game that is fun to play required more development time and additional research to create interesting game mechanics. In our case, the user study could be transformed into a game because it integrated simple visual tasks for both minigames, which were visual comparison (in the Near-Far Game) and path finding (in the Blood Circulation Game). These tasks can both be used for an experiment, but they are also common game principles. However, by themselves, these visual tasks were not interesting enough to make the game fun, so additional game elements had to be added, such as the score system or high-risk, high-reward multiple connection mechanic.</p>
        <p>Second, implementing volume rendering such that it allows real-time rendering on mobile devices required additional optimizations of the rendering code. In addition, to ensure that the game worked on different devices and operating systems, graphics processing units, resolutions, and aspect ratios also required additional development. Even though we tested our game on a variety of Android and iOS devices, we still could not guarantee that our game worked perfectly on all hardware configurations, as we received feedback from 1 (0.9%) of the 111 participants that one of the rendering techniques crashed on their device. In addition, we did not have control over the resolution or aspect ratio of the screen, which might have had an impact on performance. However, to achieve at least some consistency, we scaled the volume such that it was proportional to the vertical resolution of the screen.</p>
        <p>Third, the lack of a controlled environment may have impacted the collected data. As we could not observe how the game was played, we cannot be sure whether players were motivated to try to do their best. At the same time, we think that adding a competitive element to the study in the form of a leaderboard did indeed motivate most players to perform well, which should have resulted in a higher quality of the collected samples. We also had little control over the credibility of the data that users filled when creating their account and could not create a detailed pretest or posttest questionnaire, which was not possible on iOS owing to privacy concerns and in general could lead to a player abandoning the game before even starting to play.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Despite some of the drawbacks of gamification, using this paradigm allowed this study to collect more data samples than many similar studies [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. Furthermore, it showed that our results were more similar to those of studies with more data samples and participants (2380 for Kersten-Oertel et al [<xref ref-type="bibr" rid="ref1">1</xref>] and 2850 for Kreiser et al [<xref ref-type="bibr" rid="ref10">10</xref>]) than those of studies with fewer samples (700 for Ropinski et al [<xref ref-type="bibr" rid="ref4">4</xref>] and 600 for Abhari et al [<xref ref-type="bibr" rid="ref6">6</xref>]). These results suggest that gamification is a viable paradigm for conducting user studies in the domain of medical imaging. Moreover, as demonstrated by our number of participants and results, if the game is fun to play and motivates the players to perform well in the study, it may lead to a higher number of participants compared with an in-laboratory user study while still maintaining a high quality of the collected data. Another advantage of web distribution-based paradigms, such as gamification, is that they make it possible to perform user studies or help with surgical education in societal situations where meeting in person is not possible [<xref ref-type="bibr" rid="ref45">45</xref>]. Such was the case in this study, which was performed during the lockdown caused by the COVID-19 pandemic. Gamification is a promising technique for collecting large data samples; however, it is important to have fun games that users will continue to play. In the future, we could further improve the game by adding sound and music and examine whether these aspects have a positive impact on the time players spend in the game. In addition, we could pay the participants to play our game to determine how having a monetary incentive affects the behavior of the players, as they may enjoy the game more this way [<xref ref-type="bibr" rid="ref46">46</xref>]. Regarding the study itself, in the future, illustrative techniques could be added to compare an even higher number of visualizations. Some good candidates are the hatching and distance-encoded shadows technique described by Ritter et al [<xref ref-type="bibr" rid="ref3">3</xref>]; illustrative shadows, supporting lines, and contours technique described by Lawonn et al [<xref ref-type="bibr" rid="ref9">9</xref>]; and anchors technique described by Lawonn et al [<xref ref-type="bibr" rid="ref8">8</xref>]. Finally, we could compare our gamified user study to crowdsourcing, such as the EvalViz [<xref ref-type="bibr" rid="ref47">47</xref>] wizard.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Gameplay video of the Near-Far Game.</p>
        <media xlink:href="neuro_v2i1e45828_app1.mp4" xlink:title="MP4 File  (MP4 Video), 75413 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Gameplay video of the Blood Circulation Game.</p>
        <media xlink:href="neuro_v2i1e45828_app2.mp4" xlink:title="MP4 File  (MP4 Video), 90143 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">CTA</term>
          <def>
            <p>computed tomography angiography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">DVR</term>
          <def>
            <p>direct volume rendering</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">MTurk</term>
          <def>
            <p>Amazon Mechanical Turk</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">SSDO</term>
          <def>
            <p>screen space directional occlusion</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">TF</term>
          <def>
            <p>transfer function</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">VSS</term>
          <def>
            <p>void space surface</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This study was supported by the Natural Sciences and Engineering Research Council of Canada (DG-N06722).</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>All the data used in the user study are available upon request from the corresponding author.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>AT contributed to conceptualization, formal analysis, investigation, methodology, validation, visualization, and the writing of the original draft. SD contributed to investigation; methodology; supervision; and the writing, review, editing of the manuscript. MK-O contributed to conceptualization; funding acquisition; investigation; methodology; supervision; visualization; and the writing, review, and editing of the manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>AT developed the Connect Brain mobile app and its related intellectual property along with Concordia University. The app was developed for the sole purpose of drawing a scientific conclusion about depth perception in angiography visualization. It was distributed free of charge and does not have any in-app advertisements or paid content. All other authors declare no other conflicts of interest.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kersten-Oertel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>An evaluation of depth enhancing perceptual cues for vascular volume visualization in neurosurgery</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2014</year>
          <month>03</month>
          <volume>20</volume>
          <issue>3</issue>
          <fpage>391</fpage>
          <lpage>403</lpage>
          <pub-id pub-id-type="doi">10.1109/TVCG.2013.240</pub-id>
          <pub-id pub-id-type="medline">24434220</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Scheinost</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Vives</surname>
              <given-names>KP</given-names>
            </name>
            <name name-style="western">
              <surname>Spencer</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Staib</surname>
              <given-names>LH</given-names>
            </name>
            <name name-style="western">
              <surname>Papademetris</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Novel interaction techniques for neurosurgical planning and stereotactic navigation</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2008</year>
          <volume>14</volume>
          <issue>6</issue>
          <fpage>1587</fpage>
          <lpage>94</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/18989014"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TVCG.2008.150</pub-id>
          <pub-id pub-id-type="medline">18989014</pub-id>
          <pub-id pub-id-type="pmcid">PMC2633029</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ritter</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hansen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dicken</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Konrad</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Preim</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Peitgen</surname>
              <given-names>HO</given-names>
            </name>
          </person-group>
          <article-title>Real-time illustration of vascular structures</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2006</year>
          <volume>12</volume>
          <issue>5</issue>
          <fpage>877</fpage>
          <lpage>84</lpage>
          <pub-id pub-id-type="doi">10.1109/TVCG.2006.172</pub-id>
          <pub-id pub-id-type="medline">17080812</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ropinski</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Steinicke</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hinrichs</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Visually supporting depth perception in angiography imaging</article-title>
          <source>Proceedings of the 6th International Symposium, SG 2006</source>
          <year>2006</year>
          <conf-name>6th International Symposium, SG 2006</conf-name>
          <conf-date>July 23-25, 2006</conf-date>
          <conf-loc>Vancouver, BC</conf-loc>
          <pub-id pub-id-type="doi">10.1007/11795018_9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Preim</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Baer</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cunningham</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Isenberg</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Ropinski</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A survey of perceptually motivated 3D visualization of medical image data</article-title>
          <source>Comput Graph Forum</source>
          <year>2016</year>
          <month>07</month>
          <day>04</day>
          <volume>35</volume>
          <issue>3</issue>
          <fpage>501</fpage>
          <lpage>25</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1111/cgf.12927"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/cgf.12927</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abhari</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Baxter</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Eagleson</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Peters</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>de Ribaupierrec</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Perceptual enhancement of arteriovenous malformation in MRI angiography displays</article-title>
          <source>Proceedings of the Medical imaging 2012: Image perception, observer performance, and technology assessment</source>
          <year>2012</year>
          <conf-name>Medical Imaging 2012: Image Perception, Observer Performance, and Technology Assessment</conf-name>
          <conf-date>February 8-9, 2012</conf-date>
          <conf-loc>San Diego, CA</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://iacl.ece.jhu.edu/proceedings/spie2012/DATA/8318_8.PDF"/>
          </comment>
          <pub-id pub-id-type="doi">10.1117/12.911687</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Drouin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>DiGiovanni</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Kersten-Oertel</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Interaction driven enhancement of depth perception in angiographic volumes</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2018</year>
          <month>12</month>
          <day>06</day>
          <volume>26</volume>
          <issue>6</issue>
          <fpage>2247</fpage>
          <lpage>57</lpage>
          <pub-id pub-id-type="doi">10.1109/TVCG.2018.2884940</pub-id>
          <pub-id pub-id-type="medline">30530366</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lawonn</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Luz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hansen</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Improving spatial perception of vascular models using supporting anchors and illustrative visualization</article-title>
          <source>Comput Graph</source>
          <year>2017</year>
          <month>04</month>
          <volume>63</volume>
          <fpage>37</fpage>
          <lpage>49</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cag.2017.02.002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lawonn</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lzu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Preim</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hansen</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Illustrative visualization of vascular models for static 2D representations</article-title>
          <source>Proceedings of the 18th International Conference on Medical Image Computing and Computer Assisted Intervention</source>
          <year>2015</year>
          <conf-name>18th International Conference on Medical Image Computing and Computer Assisted Intervention</conf-name>
          <conf-date>October 5-9, 2015</conf-date>
          <conf-loc>Munich, Germany</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-319-24571-3_48</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kreiser</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hermosilla</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ropinski</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Void space surfaces to convey depth in vessel visualizations</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2021</year>
          <month>10</month>
          <day>1</day>
          <volume>27</volume>
          <issue>10</issue>
          <fpage>3913</fpage>
          <lpage>25</lpage>
          <pub-id pub-id-type="doi">10.1109/TVCG.2020.2993992</pub-id>
          <pub-id pub-id-type="medline">32406840</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Titov</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kersten-Oertel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Drouin</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The effect of interactive cues on the perception of angiographic volumes in virtual reality</article-title>
          <source>Comput Methods Biomech Biomed Eng Imaging Vis</source>
          <year>2021</year>
          <month>11</month>
          <day>08</day>
          <volume>10</volume>
          <issue>4</issue>
          <fpage>357</fpage>
          <lpage>65</lpage>
          <pub-id pub-id-type="doi">10.1080/21681163.2021.1999332</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dergousoff</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Mandryk</surname>
              <given-names>RL</given-names>
            </name>
          </person-group>
          <article-title>Mobile gamification for crowdsourcing data collection: leveraging the freemium model</article-title>
          <source>Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems</source>
          <year>2015</year>
          <conf-name>CHI '15: CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>April 18-23, 2015</conf-date>
          <conf-loc>Seoul, Republic of Korea</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2702123.2702296</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mueller</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Gamification as a paradigm for the evaluation of visual analytics systems</article-title>
          <source>Proceedings of the Fifth Workshop on Beyond Time and Errors: Novel Evaluation Methods for Visualization</source>
          <year>2014</year>
          <conf-name>BELIV '14: Novel Evaluation Methods For Visualization 2014</conf-name>
          <conf-date>November 10, 2014</conf-date>
          <conf-loc>Paris, France</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2669557.2669574</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="web">
          <article-title>Connect brain</article-title>
          <source>Google Play</source>
          <access-date>2023-01-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://play.google.com/store/apps/details?id=ca.andreytitov.connectbrain&amp;hl=en_CA&amp;gl=CA">https://play.google.com/store/apps/details?id=ca.andreytitov.connectbrain&amp;hl=en_CA&amp;gl =CA</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="web">
          <article-title>Connect brain</article-title>
          <source>Apple Store</source>
          <access-date>2023-01-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://apps.apple.com/ca/app/connect-brain/id1524359191">https://apps.apple.com/ca/app/connect-brain/id1524359191</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Titov</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Comparing vascular visualization techniques with gamification</article-title>
          <source>Concordia University</source>
          <year>2020</year>
          <access-date>2023-08-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://spectrum.library.concordia.ca/987821/">https://spectrum.library.concordia.ca/987821/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mason</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Suri</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Conducting behavioral research on Amazon's Mechanical Turk</article-title>
          <source>Behav Res Methods</source>
          <year>2012</year>
          <month>03</month>
          <volume>44</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.3758/s13428-011-0124-6</pub-id>
          <pub-id pub-id-type="medline">21717266</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cole</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Sanik</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>DeCarlo</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Finkelstein</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Funkhouser</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rusinkiewicz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>How well do line drawings depict shape?</article-title>
          <source>Proceedings of the ACM SIGGRAPH 2009 papers</source>
          <year>2009</year>
          <conf-name>SIGGRAPH09: Special Interest Group on Computer Graphics and Interactive Techniques Conference</conf-name>
          <conf-date>August 3-7, 2009</conf-date>
          <conf-loc>New Orleans, LA</conf-loc>
          <pub-id pub-id-type="doi">10.1145/1576246.1531334</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Snow</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>O'Connor</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Jurafsky</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>AY</given-names>
            </name>
          </person-group>
          <article-title>Cheap and fast---but is it good?: evaluating non-expert annotations for natural language tasks</article-title>
          <source>Proceedings of the Conference on Empirical Methods in Natural Language Processing</source>
          <year>2008</year>
          <conf-name>EMNLP '08: Proceedings of the Conference on Empirical Methods in Natural Language Processing</conf-name>
          <conf-date>October 25-27, 2008</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <pub-id pub-id-type="doi">10.3115/1613715.1613751</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marge</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rudnicky</surname>
              <given-names>AI</given-names>
            </name>
          </person-group>
          <article-title>Using the Amazon Mechanical Turk for transcription of spoken language</article-title>
          <source>Proceedings of the 2010 IEEE International Conference on Acoustics, Speech and Signal Processing</source>
          <year>2010</year>
          <conf-name>2010 IEEE International Conference on Acoustics, Speech and Signal Processing</conf-name>
          <conf-date>March 14-19, 2010</conf-date>
          <conf-loc>Dallas, TX</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icassp.2010.5494979</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="web">
          <article-title>Unity real-time development platform</article-title>
          <source>Unity Technologies</source>
          <access-date>2023-01-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://unity.com/">https://unity.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blinn</surname>
              <given-names>JF</given-names>
            </name>
          </person-group>
          <article-title>Models of light reflection for computer synthesized pictures</article-title>
          <source>Proceedings of the 4th annual conference on Computer graphics and interactive techniques</source>
          <year>1977</year>
          <conf-name>SIGGRAPH '77: Computer graphics and interactive techniques</conf-name>
          <conf-date>July 20-22, 1977</conf-date>
          <conf-loc>San Jose, CA</conf-loc>
          <pub-id pub-id-type="doi">10.1145/563858.563893</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Drouin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Collins</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>PRISM: an open source framework for the interactive design of GPU volume rendering shaders</article-title>
          <source>PLoS One</source>
          <year>2018</year>
          <month>3</month>
          <day>13</day>
          <volume>13</volume>
          <issue>3</issue>
          <fpage>e0193636</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0193636"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0193636</pub-id>
          <pub-id pub-id-type="medline">29534069</pub-id>
          <pub-id pub-id-type="pii">PONE-D-17-16488</pub-id>
          <pub-id pub-id-type="pmcid">PMC5849289</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bailey</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Clark</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Using ChromaDepth to obtain inexpensive single-image stereovision for scientific visualization</article-title>
          <source>J Graph Tools</source>
          <year>1998</year>
          <volume>3</volume>
          <issue>3</issue>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1080/10867651.1998.10487491</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lum</surname>
              <given-names>EB</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>KL</given-names>
            </name>
          </person-group>
          <article-title>Hardware-accelerated parallel non-photorealistic volume rendering</article-title>
          <source>Proceedings of the 2nd international symposium on Non-photorealistic animation and rendering</source>
          <year>2002</year>
          <conf-name>NPAR02: Non-Photorealistic Animation and Rendering</conf-name>
          <conf-date>June 3-5, 2002</conf-date>
          <conf-loc>Annecy, France</conf-loc>
          <pub-id pub-id-type="doi">10.1145/508530.508542</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rheingans</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Ebert</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Volume illustration: nonphotorealistic rendering of volume models</article-title>
          <source>IEEE Trans Visual Comput Graph</source>
          <year>2001</year>
          <volume>7</volume>
          <issue>3</issue>
          <fpage>253</fpage>
          <lpage>64</lpage>
          <pub-id pub-id-type="doi">10.1109/2945.942693</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kersten</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Stewart</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Troje</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ellis</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Enhancing depth perception in translucent volumes</article-title>
          <source>IEEE Trans Vis Comput Graph</source>
          <year>2006</year>
          <volume>12</volume>
          <issue>5</issue>
          <fpage>1117</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.1109/TVCG.2006.139</pub-id>
          <pub-id pub-id-type="medline">17080842</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Steenblik</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>The chromostereoscopic process: a novel single image stereoscopic process</article-title>
          <source>Proceedings of the True Three-dimensional Imaging Techniques and Display Technologies</source>
          <year>1987</year>
          <conf-name>True Three-dimensional Imaging Techniques and Display Technologies</conf-name>
          <conf-date>January 15-16, 1987</conf-date>
          <conf-loc>Los Angeles, CA</conf-loc>
          <pub-id pub-id-type="doi">10.1117/12.940117</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thompson</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>May</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Stone</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Chromostereopsis: a multicomponent depth effect?</article-title>
          <source>Displays</source>
          <year>1993</year>
          <month>10</month>
          <volume>14</volume>
          <issue>4</issue>
          <fpage>227</fpage>
          <lpage>34</lpage>
          <pub-id pub-id-type="doi">10.1016/0141-9382(93)90093-k</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Suzuki</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Abe</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Topological structural analysis of digitized binary images by border following</article-title>
          <source>Comput Vis Graph Image Process</source>
          <year>1985</year>
          <month>04</month>
          <volume>30</volume>
          <issue>1</issue>
          <fpage>32</fpage>
          <lpage>46</lpage>
          <pub-id pub-id-type="doi">10.1016/0734-189x(85)90016-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shepard</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>A two-dimensional interpolation function for irregularly-spaced data</article-title>
          <source>Proceedings of the 1968 23rd ACM national conferenc</source>
          <year>1968</year>
          <conf-name>ACM '68: Proceedings of the 1968 23rd ACM national conference</conf-name>
          <conf-date>August 27-29, 1968</conf-date>
          <conf-loc>New York, NY</conf-loc>
          <pub-id pub-id-type="doi">10.1145/800186.810616</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ritsche</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Grosch</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Seidel</surname>
              <given-names>H-P</given-names>
            </name>
          </person-group>
          <article-title>Approximating dynamic global illumination in image space</article-title>
          <source>Proceedings of the 2009 symposium on Interactive 3D graphics and games</source>
          <year>2009</year>
          <conf-name>I3D '09: Symposium on Interactive 3D Graphics and Games</conf-name>
          <conf-date>February 27-March 1, 2009</conf-date>
          <conf-loc>Boston, MA</conf-loc>
          <pub-id pub-id-type="doi">10.1145/1507149.1507161</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bavoil</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sainz</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Screen space ambient occlusion</article-title>
          <source>NVIDIA Corporation</source>
          <year>2008</year>
          <month>10</month>
          <access-date>2021-10-12</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/publication/228576448_Screen_Space_Ambient_Occlusion">https://www.researchgate.net/publication/228576448_Screen_Space_Ambient_Occlusion</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kruger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Westermann</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Acceleration techniques for GPU-based volume rendering</article-title>
          <source>Proceedings of the IEEE Visualization</source>
          <year>2003</year>
          <conf-name>IEEE Visualization</conf-name>
          <conf-date>October 19-24, 2003</conf-date>
          <conf-loc>Seattle, WA</conf-loc>
          <pub-id pub-id-type="doi">10.1109/visual.2003.1250384</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Levoy</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Efficient ray tracing of volume data</article-title>
          <source>ACM Trans Graph</source>
          <year>1990</year>
          <month>07</month>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>245</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="doi">10.1145/78964.78965</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zuiderveld</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Koning,</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>Viergever</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Acceleration of ray-casting using 3-D distance transforms</article-title>
          <source>Proceedings of the Visualization in Biomedical Computing 1992</source>
          <year>1992</year>
          <conf-name>Proceedings Visualization in Biomedical Computing 1992</conf-name>
          <conf-date>October 13-16, 1992</conf-date>
          <conf-loc>Chapel Hill, NC</conf-loc>
          <pub-id pub-id-type="doi">10.1117/12.131088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="web">
          <article-title>HTC U12+ Mockup.png</article-title>
          <source>Wikimedia</source>
          <access-date>2023-01-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://commons.wikimedia.org/wiki/File:HTC_U12%2B_Mockup.png">https://commons.wikimedia.org/wiki/File:HTC_U12%2B_Mockup.png</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="web">
          <article-title>CC BY-SA 3.0 DEED Attribution-ShareAlike 3.0 Unported</article-title>
          <source>Creative Commons</source>
          <access-date>2023-01-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://creativecommons.org/licenses/by-sa/3.0/">https://creativecommons.org/licen ses/by-sa/3.0/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hart</surname>
              <given-names>PE</given-names>
            </name>
            <name name-style="western">
              <surname>Nilsson</surname>
              <given-names>NJ</given-names>
            </name>
            <name name-style="western">
              <surname>Raphael</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A formal basis for the heuristic determination of minimum cost paths</article-title>
          <source>IEEE Trans Syst Sci Cyber</source>
          <year>1968</year>
          <month>07</month>
          <volume>4</volume>
          <issue>2</issue>
          <fpage>100</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1109/tssc.1968.300136</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fredman</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Tarjan</surname>
              <given-names>RE</given-names>
            </name>
          </person-group>
          <article-title>Fibonacci heaps and their uses in improved network optimization algorithms</article-title>
          <source>J ACM</source>
          <year>1987</year>
          <month>07</month>
          <day>1</day>
          <volume>34</volume>
          <issue>3</issue>
          <fpage>596</fpage>
          <lpage>615</lpage>
          <pub-id pub-id-type="doi">10.1145/28869.28874</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="web">
          <article-title>LinkedIn: log in or sign up</article-title>
          <source>LinkedIn</source>
          <access-date>2023-03-23</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.linkedin.com/">https://www.linkedin.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <article-title>Twitter homepage</article-title>
          <source>Twitter</source>
          <access-date>2023-03-23</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://twitter.com/">https://twitter.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="web">
          <article-title>Facebook - log in or sign up</article-title>
          <source>Facebook</source>
          <access-date>2023-03-23</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.facebook.com/">https://www.facebook.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="web">
          <article-title>IBM SPSS software</article-title>
          <source>IBM</source>
          <access-date>2023-01-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ibm.com/spss">https://www.ibm.com/spss</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guérard-Poirier</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Beniey</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Meloche-Dumas</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lebel-Guay</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Misheva</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Abbas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dhane</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Elraheb</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dubrowski</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Patocskai</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>An educational network for surgical education supported by gamification elements: protocol for a randomized controlled trial</article-title>
          <source>JMIR Res Protoc</source>
          <year>2020</year>
          <month>12</month>
          <day>14</day>
          <volume>9</volume>
          <issue>12</issue>
          <fpage>e21273</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchprotocols.org/2020/12/e21273/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/21273</pub-id>
          <pub-id pub-id-type="medline">33284780</pub-id>
          <pub-id pub-id-type="pii">v9i12e21273</pub-id>
          <pub-id pub-id-type="pmcid">PMC7744140</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Washington</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kalantarian</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tariq</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dunlap</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chrisman</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Varma</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ning</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kline</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Stockham</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Paskov</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Voss</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Haber</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wall</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>Validity of online screening for autism: crowdsourcing study comparing paid and unpaid diagnostic tasks</article-title>
          <source>J Med Internet Res</source>
          <year>2019</year>
          <month>05</month>
          <day>23</day>
          <volume>21</volume>
          <issue>5</issue>
          <fpage>e13668</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2019/5/e13668/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/13668</pub-id>
          <pub-id pub-id-type="medline">31124463</pub-id>
          <pub-id pub-id-type="pii">v21i5e13668</pub-id>
          <pub-id pub-id-type="pmcid">PMC6552453</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meuschke</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Smit</surname>
              <given-names>NN</given-names>
            </name>
            <name name-style="western">
              <surname>Lichtenberg</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Preim</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Lawonn</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>EvalViz – surface visualization evaluation wizard for depth and shape perception tasks</article-title>
          <source>Comput Graph</source>
          <year>2019</year>
          <month>08</month>
          <volume>82</volume>
          <fpage>250</fpage>
          <lpage>63</lpage>
          <pub-id pub-id-type="doi">10.1016/j.cag.2019.05.022</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
