var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/hci.txt?dl=0&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/hci.txt?dl=0&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://kuis-ai-publications.s3.eu-central-1.amazonaws.com/hci.txt?dl=0&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2022\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Use of Affective Visual Information for Summarization of Human-Centric Videos.\n \n \n \n \n\n\n \n Köprü, B.; and Erzin, E.\n\n\n \n\n\n\n CoRR,1-14. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"UsePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{DBLP:journals/corr/abs-2107-03783,\n    author = {K{\\"{o}}pr{\\"{u}}, Berkay and Erzin, Engin},\n    title = "Use of Affective Visual Information for Summarization of Human-Centric Videos",\n    journal = "CoRR",\n    year = "2022",\n    abstract = "The increasing volume of user-generated human-centric video content and its applications, such as video retrieval and browsing, require compact representations addressed by the video summarization literature. Current supervised studies formulate video summarization as a sequence-to-sequence learning problem, and the existing solutions often neglect the surge of the human-centric view, which inherently contains affective content. In this study, we investigate the affective-information enriched supervised video summarization task for human-centric videos. First, we train a visual input-driven state-of-the-art continuous emotion recognition model (CER-NET) on the RECOLA dataset to estimate activation and valence attributes. Then, we integrate the estimated emotional attributes and their high-level embeddings from the CER-NET with the visual information to define the proposed affective video summarization (AVSUM) architectures. In addition, we investigate the use of attention to improve the AVSUM architectures and propose two new architectures based on temporal attention (TA-AVSUM) and spatial attention (SA-AVSUM). We conduct video summarization experiments on the TvSum and COGNIMUSE datasets. The proposed temporal attention-based TA-AVSUM architecture attains competitive video summarization performances with strong improvements for the human-centric videos compared to the state-of-the-art in terms of F-score, self-defined face recall, and rank correlation metrics.",\n    keywords = "CV,HCI",\n    pages = "1-14",\n    url = "https://ieeexplore.ieee.org/document/9954146",\n    doi = "10.1109/TAFFC.2022.3222882",\n    publisher = "IEEE"\n}\n
\n
\n\n\n
\n The increasing volume of user-generated human-centric video content and its applications, such as video retrieval and browsing, require compact representations addressed by the video summarization literature. Current supervised studies formulate video summarization as a sequence-to-sequence learning problem, and the existing solutions often neglect the surge of the human-centric view, which inherently contains affective content. In this study, we investigate the affective-information enriched supervised video summarization task for human-centric videos. First, we train a visual input-driven state-of-the-art continuous emotion recognition model (CER-NET) on the RECOLA dataset to estimate activation and valence attributes. Then, we integrate the estimated emotional attributes and their high-level embeddings from the CER-NET with the visual information to define the proposed affective video summarization (AVSUM) architectures. In addition, we investigate the use of attention to improve the AVSUM architectures and propose two new architectures based on temporal attention (TA-AVSUM) and spatial attention (SA-AVSUM). We conduct video summarization experiments on the TvSum and COGNIMUSE datasets. The proposed temporal attention-based TA-AVSUM architecture attains competitive video summarization performances with strong improvements for the human-centric videos compared to the state-of-the-art in terms of F-score, self-defined face recall, and rank correlation metrics.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Kart-ON: Affordable Early Programming Education with Shared Smartphones and Easy-to-Find Materials.\n \n \n \n \n\n\n \n Sabuncuoğlu, A.; and Sezgin, M.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, of IUI '20, pages 116–117, New York, NY, USA, 2020. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"Kart-ON:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/3379336.3381472,\n    author = "Sabuncuo\\u{g}lu, Alpay and Sezgin, Metin",\n    title = "Kart-ON: Affordable Early Programming Education with Shared Smartphones and Easy-to-Find Materials",\n    year = "2020",\n    isbn = "9781450375139",\n    publisher = "Association for Computing Machinery",\n    address = "New York, NY, USA",\n    url = "https://doi.org/10.1145/3379336.3381472",\n    doi = "10.1145/3379336.3381472",\n    abstract = "Programming education has become an integral part of the primary school curriculum. However, most programming practices rely heavily on computers and electronics which causes inequalities across contexts with different socioeconomic levels. This demo introduces a new and convenient way of using tangibles for coding in classrooms. Our programming environment, Kart-ON, is designed as an affordable means to increase collaboration among students and decrease dependency on screen-based interfaces. Kart-ON is a tangible programming language that uses everyday objects such as paper, pen, fabrics as programming objects and employs a mobile phone as the compiler. Our preliminary studies with children (n=16, mage=12) show that Kart-ON boosts active and collaborative student participation in the tangible programming task, which is especially valuable in crowded classrooms with limited access to computational devices.",\n    booktitle = "Proceedings of the 25th International Conference on Intelligent User Interfaces Companion",\n    pages = "116–117",\n    keywords = "Affordable tangible programming,Collaborative classroom,HCI",\n    series = "IUI '20"\n}\n\n
\n
\n\n\n
\n Programming education has become an integral part of the primary school curriculum. However, most programming practices rely heavily on computers and electronics which causes inequalities across contexts with different socioeconomic levels. This demo introduces a new and convenient way of using tangibles for coding in classrooms. Our programming environment, Kart-ON, is designed as an affordable means to increase collaboration among students and decrease dependency on screen-based interfaces. Kart-ON is a tangible programming language that uses everyday objects such as paper, pen, fabrics as programming objects and employs a mobile phone as the compiler. Our preliminary studies with children (n=16, mage=12) show that Kart-ON boosts active and collaborative student participation in the tangible programming task, which is especially valuable in crowded classrooms with limited access to computational devices.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-driven vibrotactile rendering of digital buttons on touchscreens.\n \n \n \n \n\n\n \n Sadia, B.; Emgin, S. E.; Sezgin, T. M.; and Basdogan, C.\n\n\n \n\n\n\n International Journal of Human-Computer Studies, 135: 102363. Mar 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Data-drivenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Sadia_2020,\n    author = "Sadia, Bushra and Emgin, Senem Ezgi and Sezgin, T. Metin and Basdogan, Cagatay",\n    title = "Data-driven vibrotactile rendering of digital buttons on touchscreens",\n    volume = "135",\n    issn = "1071-5819",\n    url = "http://dx.doi.org/10.1016/j.ijhcs.2019.09.005",\n    doi = "10.1016/j.ijhcs.2019.09.005",\n    journal = "International Journal of Human-Computer Studies",\n    publisher = "Elsevier BV",\n    year = "2020",\n    month = "Mar",\n    pages = "102363",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Review of Surface Haptics: Enabling Tactile Effects on Touch Surfaces.\n \n \n \n \n\n\n \n Basdogan, C.; Giraud, F.; Levesque, V.; and Choi, S.\n\n\n \n\n\n\n IEEE Transactions on Haptics, 13(3). 2020.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Basdogan_2020,\n    author = "Basdogan, Cagatay and Giraud, Frederic and Levesque, Vincent and Choi, Seungmoon",\n    title = "A Review of Surface Haptics: Enabling Tactile Effects on Touch Surfaces",\n    volume = "13",\n    issn = "2334-0134",\n    url = "http://dx.doi.org/10.1109/TOH.2020.2990712",\n    doi = "10.1109/toh.2020.2990712",\n    number = "3",\n    journal = "IEEE Transactions on Haptics",\n    publisher = "Institute of Electrical and Electronics Engineers (IEEE)",\n    year = "2020",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Multimodal prediction of head nods in dyadic conversations.\n \n \n \n\n\n \n Türker, B. B.; Sezgin, T. M.; Yemez, Y.; and Erzin, E.\n\n\n \n\n\n\n 2018 26th Signal Processing and Communications Applications Conference (SIU),1-4. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Trker2018MultimodalPO,\n    author = {T{\\"u}rker, Bekir Berker and Sezgin, T. M. and Yemez, Y. and Erzin, E.},\n    title = "Multimodal prediction of head nods in dyadic conversations",\n    journal = "2018 26th Signal Processing and Communications Applications Conference (SIU)",\n    year = "2018",\n    pages = "1-4",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Audio-Visual Prediction of Head-Nod and Turn-Taking Events in Dyadic Interactions.\n \n \n \n\n\n \n Türker, B. B.; Erzin, E.; Yemez, Y.; and Sezgin, T. M.\n\n\n \n\n\n\n In INTERSPEECH, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Trker2018AudioVisualPO,\n    author = {T{\\"u}rker, Bekir Berker and Erzin, E. and Yemez, Y. and Sezgin, T. M.},\n    title = "Audio-Visual Prediction of Head-Nod and Turn-Taking Events in Dyadic Interactions",\n    booktitle = "INTERSPEECH",\n    year = "2018",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multifaceted Engagement in Social Interaction with a Machine: The JOKER Project.\n \n \n \n\n\n \n Devillers, L.; Rosset, S.; Duplessis, G. D.; Bechade, L.; Yemez, Y.; Türker, B. B.; Sezgin, T. M.; Erzin, E.; Haddad, K.; Dupont, S.; Deléglise, P.; Estève, Y.; Lailler, C.; Gilmartin, E.; and Campbell, N.\n\n\n \n\n\n\n 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018),697-701. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{Devillers2018MultifacetedEI,\n    author = {Devillers, L. and Rosset, S. and Duplessis, G. D. and Bechade, Lucile and Yemez, Y. and T{\\"u}rker, Bekir Berker and Sezgin, T. M. and Erzin, E. and Haddad, K. and Dupont, S. and Del{\\'e}glise, P. and Est{\\`e}ve, Y. and Lailler, C. and Gilmartin, E. and Campbell, N.},\n    title = "Multifaceted Engagement in Social Interaction with a Machine: The JOKER Project",\n    journal = "2018 13th IEEE International Conference on Automatic Face \\& Gesture Recognition (FG 2018)",\n    year = "2018",\n    pages = "697-701",\n    keywords = "HCI,MSP"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Characterizing user behavior for speech and sketch-based video retrieval interfaces.\n \n \n \n\n\n \n Altiok, O. C.; and Sezgin, T. M.\n\n\n \n\n\n\n In CAe@Expressive, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Altiok2017CharacterizingUB,\n    author = "Altiok, Ozan Can and Sezgin, T. M.",\n    title = "Characterizing user behavior for speech and sketch-based video retrieval interfaces",\n    booktitle = "CAe@Expressive",\n    year = "2017",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Visualization Literacy at Elementary School.\n \n \n \n\n\n \n Alper, B.; Riche, N.; Chevalier, F.; Boy, J.; and Sezgin, T. M.\n\n\n \n\n\n\n Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Alper2017VisualizationLA,\n    author = "Alper, B. and Riche, N. and Chevalier, F. and Boy, Jeremy and Sezgin, T. M.",\n    title = "Visualization Literacy at Elementary School",\n    journal = "Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems",\n    year = "2017",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Semantic Sketch-Based Video Retrieval with Autocompletion.\n \n \n \n\n\n \n Tanase, C.; Giangreco, I.; Rossetto, L.; Schuldt, H.; Seddati, O.; Dupont, S.; Altiok, O. C.; and Sezgin, T. M.\n\n\n \n\n\n\n Companion Publication of the 21st International Conference on Intelligent User Interfaces. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Tanase2016SemanticSV,\n    author = "Tanase, C. and Giangreco, Ivan and Rossetto, L. and Schuldt, H. and Seddati, Omar and Dupont, S. and Altiok, Ozan Can and Sezgin, T. M.",\n    title = "Semantic Sketch-Based Video Retrieval with Autocompletion",\n    journal = "Companion Publication of the 21st International Conference on Intelligent User Interfaces",\n    year = "2016",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Gaze-Based Biometric Authentication: Hand-Eye Coordination Patterns as a Biometric Trait.\n \n \n \n\n\n \n Çığ, Ç.; and Sezgin, T. M.\n\n\n \n\n\n\n In Expressive, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{2016GazeBasedBA,\n    author = "{\\c{C}}{\\i}{\\u{g}}, {\\c{C}}a{\\u{g}}la and Sezgin, T. M.",\n    title = "Gaze-Based Biometric Authentication: Hand-Eye Coordination Patterns as a Biometric Trait",\n    booktitle = "Expressive",\n    year = "2016",\n    keywords = "HCI"\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);