var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://christopherclarke.net/publications.bib&commas=true&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://christopherclarke.net/publications.bib&commas=true&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://christopherclarke.net/publications.bib&commas=true&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2022\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Step Into My Mind Palace: Exploration of a Collaborative Paragogy Tool in VR.\n \n \n \n\n\n \n Sims, R., Chang, B., Bennett, V., Krishnan, A., Aboubakar, A., Coman, G., Bahrami, A., Huang, Z., Clarke, C., & Karnik, A.\n\n\n \n\n\n\n In 2022 8th International Conference of the Immersive Learning Research Network (iLRN), pages 1-8, 2022. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{9815936,\n  author={Sims, Robert and Chang, Barry and Bennett, Verity and Krishnan, Advaith and Aboubakar, Abdalslam and Coman, George and Bahrami, Abdulrazak and Huang, Zehao and Clarke, Christopher and Karnik, Abhijit},\n  booktitle={2022 8th International Conference of the Immersive Learning Research Network (iLRN)},\n  title={Step Into My Mind Palace: Exploration of a Collaborative Paragogy Tool in VR},\n  year={2022},\n  volume={},\n  number={},\n  pages={1-8},\n  doi={10.23919/iLRN55037.2022.9815936}}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n OpenEarable: Open Hardware Earable Sensing Platform.\n \n \n \n \n\n\n \n Röddiger, T., King, T., Roodt, D. R., Clarke, C., & Beigl, M.\n\n\n \n\n\n\n In Proceedings of the 1st International Workshop on Earable Computing, of EarComp’22, pages 29–34, New York, NY, USA, 2022. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"OpenEarable:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3544793.3563415,\ntitle = {OpenEarable: Open Hardware Earable Sensing Platform},\nauthor = {Röddiger, Tobias and King, Tobias and Roodt, Dylan Ray and Clarke, Christopher and Beigl, Michael},\nyear = 2022,\nbooktitle = {Proceedings of the 1st International Workshop on Earable Computing},\nlocation = {Cambridge, United Kingdom},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nseries = {EarComp’22},\npages = {29–34},\ndoi = {10.1145/3544793.3563415},\nurl = {https://doi.org/10.1145/3544793.3563415},\nnumpages = 6,\nkeywords = {In-Ear Headphones, IMU, Monitoring}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sensing with Earables: A Systematic Literature Review and Taxonomy of Phenomena.\n \n \n \n \n\n\n \n Röddiger, T., Clarke, C., Breitling, P., Schneegans, T., Zhao, H., Gellersen, H., & Beigl, M.\n\n\n \n\n\n\n Proc. ACM Interact. Mob. Wearable Ubiquitous Technol., 6(3). sep 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SensingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{10.1145/3550314,\nauthor = {R\\"{o}ddiger, Tobias and Clarke, Christopher and Breitling, Paula and Schneegans, Tim and Zhao, Haibin and Gellersen, Hans and Beigl, Michael},\ntitle = {Sensing with Earables: A Systematic Literature Review and Taxonomy of Phenomena},\nyear = {2022},\nissue_date = {September 2022},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nvolume = {6},\nnumber = {3},\nurl = {https://doi.org/10.1145/3550314},\ndoi = {10.1145/3550314},\nabstract = {Earables have emerged as a unique platform for ubiquitous computing by augmenting ear-worn devices with state-of-the-art sensing. This new platform has spurred a wealth of new research exploring what can be detected on a wearable, small form factor. As a sensing platform, the ears are less susceptible to motion artifacts and are located in close proximity to a number of important anatomical structures including the brain, blood vessels, and facial muscles which reveal a wealth of information. They can be easily reached by the hands and the ear canal itself is affected by mouth, face, and head movements. We have conducted a systematic literature review of 271 earable publications from the ACM and IEEE libraries. These were synthesized into an open-ended taxonomy of 47 different phenomena that can be sensed in, on, or around the ear. Through analysis, we identify 13 fundamental phenomena from which all other phenomena can be derived, and discuss the different sensors and sensing principles used to detect them. We comprehensively review the phenomena in four main areas of (i) physiological monitoring and health, (ii) movement and activity, (iii) interaction, and (iv) authentication and identification. This breadth highlights the potential that earables have to offer as a ubiquitous, general-purpose platform.},\njournal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},\nmonth = {sep},\narticleno = {135},\nnumpages = {57},\nkeywords = {earables, ear wearable, ear-mounted, headphones, earbuds, ear-attached, ear-worn, hearables, earpiece, earphones, ear-based}\n}\n\n
\n
\n\n\n
\n Earables have emerged as a unique platform for ubiquitous computing by augmenting ear-worn devices with state-of-the-art sensing. This new platform has spurred a wealth of new research exploring what can be detected on a wearable, small form factor. As a sensing platform, the ears are less susceptible to motion artifacts and are located in close proximity to a number of important anatomical structures including the brain, blood vessels, and facial muscles which reveal a wealth of information. They can be easily reached by the hands and the ear canal itself is affected by mouth, face, and head movements. We have conducted a systematic literature review of 271 earable publications from the ACM and IEEE libraries. These were synthesized into an open-ended taxonomy of 47 different phenomena that can be sensed in, on, or around the ear. Through analysis, we identify 13 fundamental phenomena from which all other phenomena can be derived, and discuss the different sensors and sensing principles used to detect them. We comprehensively review the phenomena in four main areas of (i) physiological monitoring and health, (ii) movement and activity, (iii) interaction, and (iv) authentication and identification. This breadth highlights the potential that earables have to offer as a ubiquitous, general-purpose platform.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Advanced Visual Interfaces for Augmented Video.\n \n \n \n \n\n\n \n Coccoli, M., Galluccio, I., Torre, I., Amenduni, F., Cattaneo, A., & Clarke, C.\n\n\n \n\n\n\n In Proceedings of the 2022 International Conference on Advanced Visual Interfaces, of AVI 2022, New York, NY, USA, 2022. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"AdvancedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3531073.3535253,\nauthor = {Coccoli, Mauro and Galluccio, Ilenia and Torre, Ilaria and Amenduni, Francesca and Cattaneo, Alberto and Clarke, Christopher},\ntitle = {Advanced Visual Interfaces for Augmented Video},\nyear = {2022},\nisbn = {9781450397193},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3531073.3535253},\ndoi = {10.1145/3531073.3535253},\nabstract = {The growing use of online videos across a wide range of applications, including education and training, demands new approaches to enhance their utility and the user experience, and to minimise or overcome their limitations. In addition, these new approaches must consider the needs of users with different requirements, abilities, and usage contexts. Advances in human-computer interaction, immersive video, artificial intelligence and adaptive systems can be effectively exploited to this aim, opening up exciting opportunities for enhancing the video medium. The purpose of this workshop is to bring together experts in the fields above and from popular application domains in order to provide a forum for discussing the current state-of-the-art and requirements for specific application domains, in addition to proposing experimental and theoretical approaches.},\nbooktitle = {Proceedings of the 2022 International Conference on Advanced Visual Interfaces},\narticleno = {91},\nnumpages = {3},\nkeywords = {visual feedback, intelligent user interfaces, 360 degree video, hypervideos},\nlocation = {Frascati, Rome, Italy},\nseries = {AVI 2022}\n}\n\n
\n
\n\n\n
\n The growing use of online videos across a wide range of applications, including education and training, demands new approaches to enhance their utility and the user experience, and to minimise or overcome their limitations. In addition, these new approaches must consider the needs of users with different requirements, abilities, and usage contexts. Advances in human-computer interaction, immersive video, artificial intelligence and adaptive systems can be effectively exploited to this aim, opening up exciting opportunities for enhancing the video medium. The purpose of this workshop is to bring together experts in the fields above and from popular application domains in order to provide a forum for discussing the current state-of-the-art and requirements for specific application domains, in addition to proposing experimental and theoretical approaches.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Reactive Video: Movement Sonification as Auditory Feedback for Supporting Physical Activity.\n \n \n \n\n\n \n Cavdir, D., Clarke, C., Chiu, P., Denoue, L., & Kimber, D.\n\n\n \n\n\n\n In New Interfaces for Musical Expression (NIME) 2021. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{cavdir2021reactive,\n  title={Reactive Video: Movement Sonification as Auditory Feedback for Supporting Physical Activity},\n  author={Cavdir, Doga and Clarke, Christopher and Chiu, Patrick and Denoue, Laurent and Kimber, Don},\n  booktitle={New Interfaces for Musical Expression (NIME) 2021},\n  year={2021}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EarRumble: Discreet Hands- and Eyes-Free Input by Voluntary Tensor Tympani Muscle Contraction.\n \n \n \n \n\n\n \n Röddiger, T., Clarke, C., Wolffram, D., Budde, M., & Beigl, M.\n\n\n \n\n\n\n In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, of CHI '21, New York, NY, USA, 2021. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"EarRumble:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3411764.3445205,\nauthor = {R\\"{o}ddiger, Tobias and Clarke, Christopher and Wolffram, Daniel and Budde, Matthias and Beigl, Michael},\ntitle = {EarRumble: Discreet Hands- and Eyes-Free Input by Voluntary Tensor Tympani Muscle Contraction},\nyear = {2021},\nisbn = {9781450380966},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3411764.3445205},\ndoi = {10.1145/3411764.3445205},\nabstract = {We explore how discreet input can be provided using the tensor tympani - a small muscle\nin the middle ear that some people can voluntarily contract to induce a dull rumbling\nsound. We investigate the prevalence and ability to control the muscle through an\nonline questionnaire (N=192) in which 43.2% of respondents reported the ability to\n“ear rumble”. Data collected from participants (N=16) shows how in-ear barometry can\nbe used to detect voluntary tensor tympani contraction in the sealed ear canal. This\ndata was used to train a classifier based on three simple ear rumble “gestures” which\nachieved 95% accuracy. Finally, we evaluate the use of ear rumbling for interaction,\ngrounded in three manual, dual-task application scenarios (N=8). This highlights the\napplicability of EarRumble as a low-effort and discreet eyes- and hands-free interaction\ntechnique that users found “magical” and “almost telepathic”. },\nbooktitle = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems},\narticleno = {743},\nnumpages = {14},\nkeywords = {discreet interaction, in-ear barometry, hearables, earables, tensor tympani muscle, subtle gestures},\nlocation = {Yokohama, Japan},\nseries = {CHI '21}\n}\n\n
\n
\n\n\n
\n We explore how discreet input can be provided using the tensor tympani - a small muscle in the middle ear that some people can voluntarily contract to induce a dull rumbling sound. We investigate the prevalence and ability to control the muscle through an online questionnaire (N=192) in which 43.2% of respondents reported the ability to “ear rumble”. Data collected from participants (N=16) shows how in-ear barometry can be used to detect voluntary tensor tympani contraction in the sealed ear canal. This data was used to train a classifier based on three simple ear rumble “gestures” which achieved 95% accuracy. Finally, we evaluate the use of ear rumbling for interaction, grounded in three manual, dual-task application scenarios (N=8). This highlights the applicability of EarRumble as a low-effort and discreet eyes- and hands-free interaction technique that users found “magical” and “almost telepathic”. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaze+Hold: Eyes-Only Direct Manipulation with Continuous Gaze Modulated by Closure of One Eye.\n \n \n \n \n\n\n \n Ramirez Gomez, A. R., Clarke, C., Sidenmark, L., & Gellersen, H.\n\n\n \n\n\n\n In ACM Symposium on Eye Tracking Research and Applications, of ETRA '21 Full Papers, New York, NY, USA, 2021. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"Gaze+Hold:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3448017.3457381,\nauthor = {Ramirez Gomez, Argenis Ramirez and Clarke, Christopher and Sidenmark, Ludwig and Gellersen, Hans},\ntitle = {Gaze+Hold: Eyes-Only Direct Manipulation with Continuous Gaze Modulated by Closure of One Eye},\nyear = {2021},\nisbn = {9781450383448},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3448017.3457381},\ndoi = {10.1145/3448017.3457381},\nabstract = { The eyes are coupled in their gaze function and therefore usually treated as a single\ninput channel, limiting the range of interactions. However, people are able to open\nand close one eye while still gazing with the other. We introduce Gaze+Hold as an\neyes-only technique that builds on this ability to leverage the eyes as separate input\nchannels, with one eye modulating the state of interaction while the other provides\ncontinuous input. Gaze+Hold enables direct manipulation beyond pointing which we explore\nthrough the design of Gaze+Hold techniques for a range of user interface tasks. In\na user study, we evaluated performance, usability and user’s spontaneous choice of\neye for modulation of input. The results show that users are effective with Gaze+Hold.\nThe choice of dominant versus non-dominant eye had no effect on performance, perceived\nusability and workload. This is significant for the utility of Gaze+Hold as it affords\nflexibility for mapping of either eye in different configurations.},\nbooktitle = {ACM Symposium on Eye Tracking Research and Applications},\narticleno = {10},\nnumpages = {12},\nkeywords = {Winks, Eye Tracking, Design, Gaze Pointing, Direct Manipulation, Closing eyelids, Gaze Interaction},\nlocation = {Virtual Event, Germany},\nseries = {ETRA '21 Full Papers}\n}\n\n
\n
\n\n\n
\n The eyes are coupled in their gaze function and therefore usually treated as a single input channel, limiting the range of interactions. However, people are able to open and close one eye while still gazing with the other. We introduce Gaze+Hold as an eyes-only technique that builds on this ability to leverage the eyes as separate input channels, with one eye modulating the state of interaction while the other provides continuous input. Gaze+Hold enables direct manipulation beyond pointing which we explore through the design of Gaze+Hold techniques for a range of user interface tasks. In a user study, we evaluated performance, usability and user’s spontaneous choice of eye for modulation of input. The results show that users are effective with Gaze+Hold. The choice of dominant versus non-dominant eye had no effect on performance, perceived usability and workload. This is significant for the utility of Gaze+Hold as it affords flexibility for mapping of either eye in different configurations.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Reactive Video: Adaptive Video Playback Based on User Motion for Supporting Physical Activity.\n \n \n \n \n\n\n \n Clarke, C., Cavdir, D., Chiu, P., Denoue, L., & Kimber, D.\n\n\n \n\n\n\n In Proceedings of the 33rd Annual ACM Symposium on User Interface Software and Technology, of UIST '20, pages 196–208, New York, NY, USA, 2020. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"ReactivePaper\n  \n \n \n \"Reactive link\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3379337.3415591,\nauthor = {Clarke, Christopher and Cavdir, Doga and Chiu, Patrick and Denoue, Laurent and Kimber, Don},\ntitle = {Reactive Video: Adaptive Video Playback Based on User Motion for Supporting Physical Activity},\nyear = {2020},\nisbn = {9781450375146},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3379337.3415591},\ndoi = {10.1145/3379337.3415591},\nabstract = {Videos are a convenient platform to begin, maintain, or improve a fitness program\nor physical activity. Traditional video systems allow users to manipulate videos through\nspecific user interface actions such as button clicks or mouse drags, but have no\nmodel of what the user is doing and are unable to adapt in useful ways. We present\nadaptive video playback, which seamlessly synchronises video playback with the user's\nmovements, building upon the principle of direct manipulation video navigation. We\nimplement adaptive video playback in Reactive Video, a vision-based system which supports\nusers learning or practising a physical skill. The use of pre-existing videos removes\nthe need to create bespoke content or specially authored videos, and the system can\nprovide real-time guidance and feedback to better support users when learning new\nmovements. Adaptive video playback using a discrete Bayes and particle filter are\nevaluated on a data set collected of participants performing tai chi and radio exercises.\nResults show that both approaches can accurately adapt to the user's movements, however\nreversing playback can be problematic.},\nbooktitle = {Proceedings of the 33rd Annual ACM Symposium on User Interface Software and Technology},\npages = {196–208},\nnumpages = {13},\nkeywords = {physical activity, direct manipulation, full body, probabilistic},\nlocation = {Virtual Event, USA},\nseries = {UIST '20},\nurl_Link = {https://eprints.lancs.ac.uk/id/eprint/147601/1/ReactiveVideo_UIST2020_PrePrint.pdf}\n}\n\n
\n
\n\n\n
\n Videos are a convenient platform to begin, maintain, or improve a fitness program or physical activity. Traditional video systems allow users to manipulate videos through specific user interface actions such as button clicks or mouse drags, but have no model of what the user is doing and are unable to adapt in useful ways. We present adaptive video playback, which seamlessly synchronises video playback with the user's movements, building upon the principle of direct manipulation video navigation. We implement adaptive video playback in Reactive Video, a vision-based system which supports users learning or practising a physical skill. The use of pre-existing videos removes the need to create bespoke content or specially authored videos, and the system can provide real-time guidance and feedback to better support users when learning new movements. Adaptive video playback using a discrete Bayes and particle filter are evaluated on a data set collected of participants performing tai chi and radio exercises. Results show that both approaches can accurately adapt to the user's movements, however reversing playback can be problematic.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Motion Coupling of Earable Devices in Camera View.\n \n \n \n \n\n\n \n Clarke, C., Ehrich, P., & Gellersen, H.\n\n\n \n\n\n\n In 19th International Conference on Mobile and Ubiquitous Multimedia, of MUM 2020, pages 13–17, New York, NY, USA, 2020. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"MotionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3428361.3428470,\nauthor = {Clarke, Christopher and Ehrich, Peter and Gellersen, Hans},\ntitle = {Motion Coupling of Earable Devices in Camera View},\nyear = {2020},\nisbn = {9781450388702},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3428361.3428470},\ndoi = {10.1145/3428361.3428470},\nabstract = { Earables, earphones augmented with inertial sensors and real-time data accessibility,\nprovide the opportunity for private audio channels in public settings. One of the\nmain challenges of achieving this goal is to correctly associate which device belongs\nto which user without prior information. In this paper, we explore how motion of an\nearable, as measured by the on-board accelerometer, can be correlated against detected\nfaces from a webcam to accurately match which user is wearing the device. We conduct\na data collection and explore which type of user movement can be accurately detected\nusing this approach, and investigate how varying the speed of the movement affects\ndetection rates. Our results show that the approach achieves greater detection results\nfor faster movements, and that it can differentiate the same movement across different\nparticipants with a detection rate of 86%, increasing to 92% when differentiating\na movement against others.},\nbooktitle = {19th International Conference on Mobile and Ubiquitous Multimedia},\npages = {13–17},\nnumpages = {5},\nkeywords = {Spontaneous device association, earable, motion coupling.},\nlocation = {Essen, Germany},\nseries = {MUM 2020}\n}\n\n
\n
\n\n\n
\n Earables, earphones augmented with inertial sensors and real-time data accessibility, provide the opportunity for private audio channels in public settings. One of the main challenges of achieving this goal is to correctly associate which device belongs to which user without prior information. In this paper, we explore how motion of an earable, as measured by the on-board accelerometer, can be correlated against detected faces from a webcam to accurately match which user is wearing the device. We conduct a data collection and explore which type of user movement can be accurately detected using this approach, and investigate how varying the speed of the movement affects detection rates. Our results show that the approach achieves greater detection results for faster movements, and that it can differentiate the same movement across different participants with a detection rate of 86%, increasing to 92% when differentiating a movement against others.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n BimodalGaze: Seamlessly Refined Pointing with Gaze and Filtered Gestural Head Movement.\n \n \n \n \n\n\n \n Sidenmark, L., Mardanbegi, D., Gomez, A. R., Clarke, C., & Gellersen, H.\n\n\n \n\n\n\n In ACM Symposium on Eye Tracking Research and Applications, of ETRA '20 Full Papers, New York, NY, USA, 2020. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"BimodalGaze:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3379155.3391312,\nauthor = {Sidenmark, Ludwig and Mardanbegi, Diako and Gomez, Argenis Ramirez and Clarke, Christopher and Gellersen, Hans},\ntitle = {BimodalGaze: Seamlessly Refined Pointing with Gaze and Filtered Gestural Head Movement},\nyear = {2020},\nisbn = {9781450371339},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3379155.3391312},\ndoi = {10.1145/3379155.3391312},\nabstract = {Eye gaze is a fast and ergonomic modality for pointing but limited in precision and\naccuracy. In this work, we introduce BimodalGaze, a novel technique for seamless head-based\nrefinement of a gaze cursor. The technique leverages eye-head coordination insights\nto separate natural from gestural head movement. This allows users to quickly shift\ntheir gaze to targets over larger fields of view with naturally combined eye-head\nmovement, and to refine the cursor position with gestural head movement. In contrast\nto an existing baseline, head refinement is invoked automatically, and only if a target\nis not already acquired by the initial gaze shift. Study results show that users reliably\nachieve fine-grained target selection, but we observed a higher rate of initial selection\nerrors affecting overall performance. An in-depth analysis of user performance provides\ninsight into the classification of natural versus gestural head movement, for improvement\nof BimodalGaze and other potential applications. },\nbooktitle = {ACM Symposium on Eye Tracking Research and Applications},\narticleno = {8},\nnumpages = {9},\nkeywords = {Gaze interaction, Eye tracking, Refinement, Virtual reality, Eye-head coordination},\nlocation = {Stuttgart, Germany},\nseries = {ETRA '20 Full Papers}\n}\n\n
\n
\n\n\n
\n Eye gaze is a fast and ergonomic modality for pointing but limited in precision and accuracy. In this work, we introduce BimodalGaze, a novel technique for seamless head-based refinement of a gaze cursor. The technique leverages eye-head coordination insights to separate natural from gestural head movement. This allows users to quickly shift their gaze to targets over larger fields of view with naturally combined eye-head movement, and to refine the cursor position with gestural head movement. In contrast to an existing baseline, head refinement is invoked automatically, and only if a target is not already acquired by the initial gaze shift. Study results show that users reliably achieve fine-grained target selection, but we observed a higher rate of initial selection errors affecting overall performance. An in-depth analysis of user performance provides insight into the classification of natural versus gestural head movement, for improvement of BimodalGaze and other potential applications. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Outline Pursuits: Gaze-Assisted Selection of Occluded Objects in Virtual Reality.\n \n \n \n \n\n\n \n Sidenmark, L., Clarke, C., Zhang, X., Phu, J., & Gellersen, H.\n\n\n \n\n\n\n In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, of CHI '20, pages 1–13, New York, NY, USA, 2020. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"OutlinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3313831.3376438,\nauthor = {Sidenmark, Ludwig and Clarke, Christopher and Zhang, Xuesong and Phu, Jenny and Gellersen, Hans},\ntitle = {Outline Pursuits: Gaze-Assisted Selection of Occluded Objects in Virtual Reality},\nyear = {2020},\nisbn = {9781450367080},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3313831.3376438},\ndoi = {10.1145/3313831.3376438},\nabstract = {In 3D environments, objects can be difficult to select when they overlap, as this\naffects available target area and increases selection ambiguity. We introduce Outline\nPursuits which extends a primary pointing modality for gaze-assisted selection of\noccluded objects. Candidate targets within a pointing cone are presented with an outline\nthat is traversed by a moving stimulus. This affords completion of the selection by\ngaze attention to the intended target's outline motion, detected by matching the user's\nsmooth pursuit eye movement. We demonstrate two techniques implemented based on the\nconcept, one with a controller as the primary pointer, and one in which Outline Pursuits\nare combined with head pointing for hands-free selection. Compared with conventional\nraycasting, the techniques require less movement for selection as users do not need\nto reposition themselves for a better line of sight, and selection time and accuracy\nare less affected when targets become highly occluded.},\nbooktitle = {Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems},\npages = {1–13},\nnumpages = {13},\nkeywords = {eye tracking, occlusion, smooth pursuits, virtual reality},\nlocation = {Honolulu, HI, USA},\nseries = {CHI '20}\n}\n\n
\n
\n\n\n
\n In 3D environments, objects can be difficult to select when they overlap, as this affects available target area and increases selection ambiguity. We introduce Outline Pursuits which extends a primary pointing modality for gaze-assisted selection of occluded objects. Candidate targets within a pointing cone are presented with an outline that is traversed by a moving stimulus. This affords completion of the selection by gaze attention to the intended target's outline motion, detected by matching the user's smooth pursuit eye movement. We demonstrate two techniques implemented based on the concept, one with a controller as the primary pointer, and one in which Outline Pursuits are combined with head pointing for hands-free selection. Compared with conventional raycasting, the techniques require less movement for selection as users do not need to reposition themselves for a better line of sight, and selection time and accuracy are less affected when targets become highly occluded.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dynamic motion coupling of body movement for input control.\n \n \n \n\n\n \n Clarke, C.\n\n\n \n\n\n\n Lancaster University (United Kingdom), 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{clarke2020dynamic,\n  title={Dynamic motion coupling of body movement for input control},\n  author={Clarke, Christopher},\n  year={2020},\n  publisher={Lancaster University (United Kingdom)}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Monocular Gaze Depth Estimation Using the Vestibulo-Ocular Reflex.\n \n \n \n \n\n\n \n Mardanbegi, D., Clarke, C., & Gellersen, H.\n\n\n \n\n\n\n In Proceedings of the 11th ACM Symposium on Eye Tracking Research & Applications, of ETRA '19, New York, NY, USA, 2019. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"MonocularPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3314111.3319822,\nauthor = {Mardanbegi, Diako and Clarke, Christopher and Gellersen, Hans},\ntitle = {Monocular Gaze Depth Estimation Using the Vestibulo-Ocular Reflex},\nyear = {2019},\nisbn = {9781450367097},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3314111.3319822},\ndoi = {10.1145/3314111.3319822},\nabstract = {Gaze depth estimation presents a challenge for eye tracking in 3D. This work investigates\na novel approach to the problem based on eye movement mediated by the vestibulo-ocular\nreflex (VOR). VOR stabilises gaze on a target during head movement, with eye movement\nin the opposite direction, and the VOR gain increases the closer the fixated target\nis to the viewer. We present a theoretical analysis of the relationship between VOR\ngain and depth which we investigate with empirical data collected in a user study\n(N=10). We show that VOR gain can be captured using pupil centres, and propose and\nevaluate a practical method for gaze depth estimation based on a generic function\nof VOR gain and two-point depth calibration. The results show that VOR gain is comparable\nwith vergence in capturing depth while only requiring one eye, and provide insight\ninto open challenges in harnessing VOR gain as a robust measure.},\nbooktitle = {Proceedings of the 11th ACM Symposium on Eye Tracking Research &amp; Applications},\narticleno = {20},\nnumpages = {9},\nkeywords = {eye movement, eye tracking, 3D gaze estimation, fixation depth, VOR, gaze depth estimation},\nlocation = {Denver, Colorado},\nseries = {ETRA '19}\n}\n\n
\n
\n\n\n
\n Gaze depth estimation presents a challenge for eye tracking in 3D. This work investigates a novel approach to the problem based on eye movement mediated by the vestibulo-ocular reflex (VOR). VOR stabilises gaze on a target during head movement, with eye movement in the opposite direction, and the VOR gain increases the closer the fixated target is to the viewer. We present a theoretical analysis of the relationship between VOR gain and depth which we investigate with empirical data collected in a user study (N=10). We show that VOR gain can be captured using pupil centres, and propose and evaluate a practical method for gaze depth estimation based on a generic function of VOR gain and two-point depth calibration. The results show that VOR gain is comparable with vergence in capturing depth while only requiring one eye, and provide insight into open challenges in harnessing VOR gain as a robust measure.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n MatchPoint: Spontaneous Spatial Coupling of Body Movement for Touchless Pointing.\n \n \n \n \n\n\n \n Clarke, C., & Gellersen, H.\n\n\n \n\n\n\n In Proceedings of the 30th Annual ACM Symposium on User Interface Software and Technology, of UIST '17, pages 179–192, New York, NY, USA, 2017. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"MatchPoint:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3126594.3126626,\nauthor = {Clarke, Christopher and Gellersen, Hans},\ntitle = {MatchPoint: Spontaneous Spatial Coupling of Body Movement for Touchless Pointing},\nyear = {2017},\nisbn = {9781450349819},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/3126594.3126626},\ndoi = {10.1145/3126594.3126626},\nabstract = {Pointing is a fundamental interaction technique where user movement is translated\nto spatial input on a display. Conventionally, this is based on a rigid configuration\nof a display coupled with a pointing device that determines the types of movement\nthat can be sensed, and the specific ways users can affect pointer input. Spontaneous\nspatial coupling is a novel input technique that instead allows any body movement,\nor movement of tangible objects, to be appropriated for touchless pointing on an ad\nhoc basis. Pointer acquisition is facilitated by the display presenting graphical\nobjects in motion, to which users can synchronise to define a temporary spatial coupling\nwith the body part or tangible object they used in the process. The technique can\nbe deployed using minimal hardware, as demonstrated by MatchPoint, a generic computer\nvision-based implementation of the technique that requires only a webcam. We explore\nthe design space of spontaneous spatial coupling, demonstrate the versatility of the\ntechnique with application examples, and evaluate MatchPoint performance using a multi-directional\npointing task.},\nbooktitle = {Proceedings of the 30th Annual ACM Symposium on User Interface Software and Technology},\npages = {179–192},\nnumpages = {14},\nkeywords = {vison-based interfaces, computer vision, gesture input, pointing, input techniques, touchless input, user input, bodily interaction, motion-matching},\nlocation = {Qu\\'{e}bec City, QC, Canada},\nseries = {UIST '17}\n}\n\n
\n
\n\n\n
\n Pointing is a fundamental interaction technique where user movement is translated to spatial input on a display. Conventionally, this is based on a rigid configuration of a display coupled with a pointing device that determines the types of movement that can be sensed, and the specific ways users can affect pointer input. Spontaneous spatial coupling is a novel input technique that instead allows any body movement, or movement of tangible objects, to be appropriated for touchless pointing on an ad hoc basis. Pointer acquisition is facilitated by the display presenting graphical objects in motion, to which users can synchronise to define a temporary spatial coupling with the body part or tangible object they used in the process. The technique can be deployed using minimal hardware, as demonstrated by MatchPoint, a generic computer vision-based implementation of the technique that requires only a webcam. We explore the design space of spontaneous spatial coupling, demonstrate the versatility of the technique with application examples, and evaluate MatchPoint performance using a multi-directional pointing task.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Remote Control by Body Movement in Synchrony with Orbiting Widgets: An Evaluation of TraceMatch.\n \n \n \n \n\n\n \n Clarke, C., Bellino, A., Esteves, A., & Gellersen, H.\n\n\n \n\n\n\n Proc. ACM Interact. Mob. Wearable Ubiquitous Technol., 1(3). September 2017.\n \n\n\n\n
\n\n\n\n \n \n \"RemotePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{10.1145/3130910,\nauthor = {Clarke, Christopher and Bellino, Alessio and Esteves, Augusto and Gellersen, Hans},\ntitle = {Remote Control by Body Movement in Synchrony with Orbiting Widgets: An Evaluation of TraceMatch},\nyear = {2017},\nissue_date = {September 2017},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nvolume = {1},\nnumber = {3},\nurl = {https://doi.org/10.1145/3130910},\ndoi = {10.1145/3130910},\nabstract = {In this work we consider how users can use body movement for remote control with minimal\neffort and maximum flexibility. TraceMatch is a novel technique where the interface\ndisplays available controls as circular widgets with orbiting targets, and where users\ncan trigger a control by mimicking the displayed motion. The technique uses computer\nvision to detect circular motion as a uniform type of input, but is highly appropriable\nas users can produce matching motion with any part of their body. We present three\nstudies that investigate input performance with different parts of the body, user\npreferences, and spontaneous choice of movements for input in realistic application\nscenarios. The results show that users can provide effective input with their head,\nhands and while holding objects, that multiple controls can be effectively distinguished\nby the difference in presented phase and direction of movement, and that users choose\nand switch modes of input seamlessly.},\njournal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},\nmonth = sep,\narticleno = {45},\nnumpages = {22},\nkeywords = {Gesture input, Movement correlation, Motion matching, Computer vision, Input techniques, Remote control, User input, Motion correlation, Path mimicry, User evaluation, Vision-based interfaces}\n}\n\n
\n
\n\n\n
\n In this work we consider how users can use body movement for remote control with minimal effort and maximum flexibility. TraceMatch is a novel technique where the interface displays available controls as circular widgets with orbiting targets, and where users can trigger a control by mimicking the displayed motion. The technique uses computer vision to detect circular motion as a uniform type of input, but is highly appropriable as users can produce matching motion with any part of their body. We present three studies that investigate input performance with different parts of the body, user preferences, and spontaneous choice of movements for input in realistic application scenarios. The results show that users can provide effective input with their head, hands and while holding objects, that multiple controls can be effectively distinguished by the difference in presented phase and direction of movement, and that users choose and switch modes of input seamlessly.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Motion Correlation: Selecting Objects by Matching Their Movement.\n \n \n \n \n\n\n \n Velloso, E., Carter, M., Newn, J., Esteves, A., Clarke, C., & Gellersen, H.\n\n\n \n\n\n\n ACM Trans. Comput.-Hum. Interact., 24(3). April 2017.\n \n\n\n\n
\n\n\n\n \n \n \"MotionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{10.1145/3064937,\nauthor = {Velloso, Eduardo and Carter, Marcus and Newn, Joshua and Esteves, Augusto and Clarke, Christopher and Gellersen, Hans},\ntitle = {Motion Correlation: Selecting Objects by Matching Their Movement},\nyear = {2017},\nissue_date = {July 2017},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nvolume = {24},\nnumber = {3},\nissn = {1073-0516},\nurl = {https://doi.org/10.1145/3064937},\ndoi = {10.1145/3064937},\nabstract = {Selection is a canonical task in user interfaces, commonly supported by presenting\nobjects for acquisition by pointing. In this article, we consider motion correlation\nas an alternative for selection. The principle is to represent available objects by\nmotion in the interface, have users identify a target by mimicking its specific motion,\nand use the correlation between the system’s output with the user’s input to determine\nthe selection. The resulting interaction has compelling properties, as users are guided\nby motion feedback, and only need to copy a presented motion. Motion correlation has\nbeen explored in earlier work but only recently begun to feature in holistic interface\ndesigns. We provide a first comprehensive review of the principle, and present an\nanalysis of five previously published works, in which motion correlation underpinned\nthe design of novel gaze and gesture interfaces for diverse application contexts.\nWe derive guidelines for motion correlation algorithms, motion feedback, choice of\nmodalities, overall design of motion correlation interfaces, and identify opportunities\nand challenges identified for future research and design.},\njournal = {ACM Trans. Comput.-Hum. Interact.},\nmonth = apr,\narticleno = {22},\nnumpages = {35},\nkeywords = {motion tracking, natural user interfaces, gesture interfaces, gaze interaction, Motion correlation, interaction techniques, eye tracking}\n}\n\n
\n
\n\n\n
\n Selection is a canonical task in user interfaces, commonly supported by presenting objects for acquisition by pointing. In this article, we consider motion correlation as an alternative for selection. The principle is to represent available objects by motion in the interface, have users identify a target by mimicking its specific motion, and use the correlation between the system’s output with the user’s input to determine the selection. The resulting interaction has compelling properties, as users are guided by motion feedback, and only need to copy a presented motion. Motion correlation has been explored in earlier work but only recently begun to feature in holistic interface designs. We provide a first comprehensive review of the principle, and present an analysis of five previously published works, in which motion correlation underpinned the design of novel gaze and gesture interfaces for diverse application contexts. We derive guidelines for motion correlation algorithms, motion feedback, choice of modalities, overall design of motion correlation interfaces, and identify opportunities and challenges identified for future research and design.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AURORA: autonomous real-time on-board video analytics.\n \n \n \n\n\n \n Angelov, P., Sadeghi Tehran, P., & Clarke, C.\n\n\n \n\n\n\n Neural Computing and Applications, 28(5): 855–865. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{angelov2017aurora,\n  title={AURORA: autonomous real-time on-board video analytics},\n  author={Angelov, Plamen and Sadeghi Tehran, Pouria and Clarke, Christopher},\n  journal={Neural Computing and Applications},\n  volume={28},\n  number={5},\n  pages={855--865},\n  year={2017}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n TraceMatch: A Computer Vision Technique for User Input by Tracing of Animated Controls.\n \n \n \n \n\n\n \n Clarke, C., Bellino, A., Esteves, A., Velloso, E., & Gellersen, H.\n\n\n \n\n\n\n In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing, of UbiComp '16, pages 298–303, New York, NY, USA, 2016. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"TraceMatch:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/2971648.2971714,\nauthor = {Clarke, Christopher and Bellino, Alessio and Esteves, Augusto and Velloso, Eduardo and Gellersen, Hans},\ntitle = {TraceMatch: A Computer Vision Technique for User Input by Tracing of Animated Controls},\nyear = {2016},\nisbn = {9781450344616},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/2971648.2971714},\ndoi = {10.1145/2971648.2971714},\nabstract = {Recent works have explored the concept of movement correlation interfaces, in which\nmoving objects can be selected by matching the movement of the input device to that\nof the desired object. Previous techniques relied on a single modality (e.g. gaze\nor mid-air gestures) and specific hardware to issue commands. TraceMatch is a computer\nvision technique that enables input by movement correlation while abstracting from\nany particular input modality. The technique relies only on a conventional webcam\nto enable users to produce matching gestures with any given body parts, even whilst\nholding objects. We describe an implementation of the technique for acquisition of\norbiting targets, evaluate algorithm performance for different target sizes and frequencies,\nand demonstrate use of the technique for remote control of graphical as well as physical\nobjects with different body parts.},\nbooktitle = {Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing},\npages = {298–303},\nnumpages = {6},\nkeywords = {vision-based interfaces, path mimicry, remote control, input techniques, motion matching, computer vision, ubiquitous computing, gesture input, user input},\nlocation = {Heidelberg, Germany},\nseries = {UbiComp '16}\n}\n\n
\n
\n\n\n
\n Recent works have explored the concept of movement correlation interfaces, in which moving objects can be selected by matching the movement of the input device to that of the desired object. Previous techniques relied on a single modality (e.g. gaze or mid-air gestures) and specific hardware to issue commands. TraceMatch is a computer vision technique that enables input by movement correlation while abstracting from any particular input modality. The technique relies only on a conventional webcam to enable users to produce matching gestures with any given body parts, even whilst holding objects. We describe an implementation of the technique for acquisition of orbiting targets, evaluate algorithm performance for different target sizes and frequencies, and demonstrate use of the technique for remote control of graphical as well as physical objects with different body parts.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Self-Defining Memory Cues: Creative Expression and Emotional Meaning.\n \n \n \n \n\n\n \n Sas, C., Challioner, S., Clarke, C., Wilson, R., Coman, A., Clinch, S., Harding, M., & Davies, N.\n\n\n \n\n\n\n In Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems, of CHI EA '15, pages 2013–2018, New York, NY, USA, 2015. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"Self-DefiningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/2702613.2732842,\nauthor = {Sas, Corina and Challioner, Scott and Clarke, Christopher and Wilson, Ross and Coman, Alina and Clinch, Sarah and Harding, Mike and Davies, Nigel},\ntitle = {Self-Defining Memory Cues: Creative Expression and Emotional Meaning},\nyear = {2015},\nisbn = {9781450331463},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/2702613.2732842},\ndoi = {10.1145/2702613.2732842},\nabstract = {This paper explores how people generate cues for capturing personal meaningful daily\nevents, which can be used for later recall. Such understanding can be explored to\ninform the design and development of personal informatics systems, aimed to support\nreflection and increased self-awareness. We describe a diary study with six participants\nand discuss initial findings showing the qualities of daily meaningful events, the\nvalue of different types of cues and their distinct contents for supporting episodic\nrecall.},\nbooktitle = {Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems},\npages = {2013–2018},\nnumpages = {6},\nkeywords = {emotions, meaningful daily events, doodling, creativity, episodic memory recall, self-generated cues},\nlocation = {Seoul, Republic of Korea},\nseries = {CHI EA '15}\n}\n\n
\n
\n\n\n
\n This paper explores how people generate cues for capturing personal meaningful daily events, which can be used for later recall. Such understanding can be explored to inform the design and development of personal informatics systems, aimed to support reflection and increased self-awareness. We describe a diary study with six participants and discuss initial findings showing the qualities of daily meaningful events, the value of different types of cues and their distinct contents for supporting episodic recall.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A real-time approach for autonomous detection and tracking of moving objects from UAV.\n \n \n \n\n\n \n Sadeghi-Tehran, P., Clarke, C., & Angelov, P.\n\n\n \n\n\n\n In 2014 IEEE Symposium on Evolving and Autonomous Learning Systems (EALS), pages 43–49, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{sadeghi2014real,\n  title={A real-time approach for autonomous detection and tracking of moving objects from UAV},\n  author={Sadeghi-Tehran, Pouria and Clarke, Christopher and Angelov, Plamen},\n  booktitle={2014 IEEE Symposium on Evolving and Autonomous Learning Systems (EALS)},\n  pages={43--49},\n  year={2014},\n  organization={IEEE}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Sariva: Smartphone app for real-time intelligent video analytics.\n \n \n \n\n\n \n Clarke, C., & Angelov, P.\n\n\n \n\n\n\n Journal of Automation, Mobile Robotics and Intelligent Systems,15–19. 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{clarke2014sariva,\n  title={Sariva: Smartphone app for real-time intelligent video analytics},\n  author={Clarke, Christopher and Angelov, Plamen},\n  journal={Journal of Automation, Mobile Robotics and Intelligent Systems},\n  pages={15--19},\n  year={2014}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);