var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fdaniele.dimitri&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fdaniele.dimitri&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fdaniele.dimitri&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2025\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n On-Your Marks, Ready? Exploring the User Experience of a VR Application for Runners with Cognitive-Behavioral Influences.\n \n \n \n \n\n\n \n Hernandez, F. C.; Schneider, J.; Di Mitri, D.; and Drachsler, H.\n\n\n \n\n\n\n In pages 331–341, December 2025. \n \n\n\n\n
\n\n\n\n \n \n \"On-YourPaper\n  \n \n \n \"On-Your paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hernandez_-your_2025,\n\ttitle = {On-{Your} {Marks}, {Ready}? {Exploring} the {User} {Experience} of a {VR} {Application} for {Runners} with {Cognitive}-{Behavioral} {Influences}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-989-758-746-7},\n\tshorttitle = {On-{Your} {Marks}, {Ready}?},\n\turl = {https://www.scitepress.org/Link.aspx?doi=10.5220/0013271300003932},\n\tabstract = {Digital Library},\n\turldate = {2025-12-11},\n\tauthor = {Hernandez, Fernando Cardenas and Schneider, Jan and Di Mitri, Daniele and Drachsler, Hendrik},\n\tmonth = dec,\n\tyear = {2025},\n\tpages = {331--341},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/TILXEHVP/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Digital Library\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Effect of AI‐Based Systems on Mathematics Achievement in Rural Context: A Quantitative Study.\n \n \n \n \n\n\n \n Khazanchi, R.; Di Mitri, D.; and Drachsler, H.\n\n\n \n\n\n\n Journal of Computer Assisted Learning, 41(1): e13098. February 2025.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{khazanchi_effect_2025,\n\ttitle = {The {Effect} of {AI}‐{Based} {Systems} on {Mathematics} {Achievement} in {Rural} {Context}: {A} {Quantitative} {Study}},\n\tvolume = {41},\n\tissn = {0266-4909, 1365-2729},\n\tshorttitle = {The {Effect} of {AI}‐{Based} {Systems} on {Mathematics} {Achievement} in {Rural} {Context}: {A} {Quantitative} {Study}},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.13098},\n\tdoi = {10.1111/jcal.13098},\n\tabstract = {ABSTRACT\n            \n              Background\n              Despite educational advances, poor mathematics achievement persists among K‐12 students, particularly in rural areas with limited resources and skilled teachers. Artificial Intelligence (AI) based systems have increasingly been adopted to support the diverse learning needs of students and have been shown to enhance mathematics achievement through personalized learning experiences by adapting to student's needs. Existing studies have documented the positive impact of AI‐based systems on mathematics achievement. However, there is a lack of research that has studied AI‐based systems on students' mathematics achievement and engagement in rural settings. The present study addresses the existing literature gap by focusing on the AI‐based Edmentum Exact Path, analyzing its effectiveness in enhancing mathematics achievement and engagement among 8th‐grade students in rural schools in the Southern United States. By examining both cognitive and affective engagement, along with students' mathematics achievement, this research explores the role of AI‐based systems in student learning.\n            \n            \n              Objective\n              This study aims to explore the effectiveness of Edmentum Exact Path, an AI‐based system, in improving 8th‐grade students' mathematics achievement and affective and cognitive engagement in the Southern United States.\n            \n            \n              Methods\n              \n                This study utilized a quasi‐experimental design involving 78 students from socioeconomically disadvantaged backgrounds, comparing those receiving supplemental Edmentum Exact Path‐led instruction and traditional teacher‐led methods to a control group receiving only the teacher‐led instructions. Mathematics achievement was measured through pre‐tests and post‐tests, while student engagement was assessed using the 5‐point Student Engagement Instrument (SEI). Statistical analysis was performed using\n                t\n                ‐tests and ANOVA.\n              \n            \n            \n              Results and Conclusions\n              The findings revealed statistically significant improvements in mathematics achievement for both the experimental and control groups. However, the group using teacher‐led instruction showed a statistically significant improvement in affective engagement, while no statistically significant differences were observed in cognitive engagement between groups.\n            \n            \n              Implications\n              The findings suggest that integrating AI‐based systems like Edmentum Exact Path may enhance affective engagement in mathematics among students from rural areas. However, the impact on cognitive engagement remains unclear, indicating a need for further research. These results support the potential for integrating AI‐based systems to enhance mathematics achievement in rural settings.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2025-11-19},\n\tjournal = {Journal of Computer Assisted Learning},\n\tauthor = {Khazanchi, Rashmi and Di Mitri, Daniele and Drachsler, Hendrik},\n\tmonth = feb,\n\tyear = {2025},\n\tpages = {e13098},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/TZLWEC6I/file/view}\n}\n\n\n\n
\n
\n\n\n
\n ABSTRACT Background Despite educational advances, poor mathematics achievement persists among K‐12 students, particularly in rural areas with limited resources and skilled teachers. Artificial Intelligence (AI) based systems have increasingly been adopted to support the diverse learning needs of students and have been shown to enhance mathematics achievement through personalized learning experiences by adapting to student's needs. Existing studies have documented the positive impact of AI‐based systems on mathematics achievement. However, there is a lack of research that has studied AI‐based systems on students' mathematics achievement and engagement in rural settings. The present study addresses the existing literature gap by focusing on the AI‐based Edmentum Exact Path, analyzing its effectiveness in enhancing mathematics achievement and engagement among 8th‐grade students in rural schools in the Southern United States. By examining both cognitive and affective engagement, along with students' mathematics achievement, this research explores the role of AI‐based systems in student learning. Objective This study aims to explore the effectiveness of Edmentum Exact Path, an AI‐based system, in improving 8th‐grade students' mathematics achievement and affective and cognitive engagement in the Southern United States. Methods This study utilized a quasi‐experimental design involving 78 students from socioeconomically disadvantaged backgrounds, comparing those receiving supplemental Edmentum Exact Path‐led instruction and traditional teacher‐led methods to a control group receiving only the teacher‐led instructions. Mathematics achievement was measured through pre‐tests and post‐tests, while student engagement was assessed using the 5‐point Student Engagement Instrument (SEI). Statistical analysis was performed using t ‐tests and ANOVA. Results and Conclusions The findings revealed statistically significant improvements in mathematics achievement for both the experimental and control groups. However, the group using teacher‐led instruction showed a statistically significant improvement in affective engagement, while no statistically significant differences were observed in cognitive engagement between groups. Implications The findings suggest that integrating AI‐based systems like Edmentum Exact Path may enhance affective engagement in mathematics among students from rural areas. However, the impact on cognitive engagement remains unclear, indicating a need for further research. These results support the potential for integrating AI‐based systems to enhance mathematics achievement in rural settings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From Nervous to Noteworthy: Evaluating SPEAKS, an Educational Software for Speech Content.\n \n \n \n \n\n\n \n Mouhammad, N.; Schneider, J.; Klemke, R.; and Di Mitri, D.\n\n\n \n\n\n\n In European Conference on e-Learning, volume 24, pages 271–280, October 2025. \n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n \n \"From paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{mouhammad_nervous_2025,\n\ttitle = {From {Nervous} to {Noteworthy}: {Evaluating} {SPEAKS}, an {Educational} {Software} for {Speech} {Content}},\n\tvolume = {24},\n\tcopyright = {All rights reserved},\n\tshorttitle = {From {Nervous} to {Noteworthy}},\n\turl = {https://papers.academic-conferences.org/index.php/ecel/article/view/4104},\n\tdoi = {10.34190/ecel.24.1.4104},\n\tabstract = {Public speaking is a critical competency for professionals, yet many higher education graduates lack sufficient training in this area. While educational institutions offer public speaking courses, these often cannot provide the level of individualized support students need as this would be too resource-intensive. Although various software tools exist to bridge this gap by supporting students with aspects such as non-verbal communication and slide design, there is a notable lack of tools that assist with the preparation of speech content—a key component of effective public speaking. To address this gap, we developed SPEAKS (Speech content Preparation for Effective and Authentic Knowledge Sharing), an educational application that guides students through preparing speech content while teaching them how to do so effectively. SPEAKS uses a scripted, chat-based interaction with a humorous and empathetic cockatoo character, who leads users step by step through seven stages of speech preparation. These include defining audience knowledge, setting learning goals for the audience, crafting engaging introductions, and ensuring coherence between the introduction and conclusion. The tool is grounded in public speaking pedagogy and expert interviews with 13 public speaking instructors, which informed both the instructional content and the design principles. We conducted a user study with 17 participants to evaluate SPEAKS in terms of perceived usability, perceived usefulness, perceived learning, and its impact on public speaking-related confidence. The results show that SPEAKS is easy to learn and use, and participants found its guidance-based support helpful for preparing speech content. Importantly, users reported a significant increase in their confidence to deliver a good speech after using the tool. Qualitative feedback further confirmed the tool’s usability and highlighted its strengths in supporting structured content development. Participants also identified areas for improvement, which can inform future iterations of the application. Overall, the findings suggest that guidance-based software like SPEAKS can effectively supplement traditional public speaking training by addressing the often-overlooked aspect of content preparation, especially in contexts where in-person resources are limited.},\n\tlanguage = {en},\n\turldate = {2025-10-19},\n\tbooktitle = {European {Conference} on e-{Learning}},\n\tauthor = {Mouhammad, Nina and Schneider, Jan and Klemke, Roland and Di Mitri, Daniele},\n\tmonth = oct,\n\tyear = {2025},\n\tpages = {271--280},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/IG3EW4IC/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Public speaking is a critical competency for professionals, yet many higher education graduates lack sufficient training in this area. While educational institutions offer public speaking courses, these often cannot provide the level of individualized support students need as this would be too resource-intensive. Although various software tools exist to bridge this gap by supporting students with aspects such as non-verbal communication and slide design, there is a notable lack of tools that assist with the preparation of speech content—a key component of effective public speaking. To address this gap, we developed SPEAKS (Speech content Preparation for Effective and Authentic Knowledge Sharing), an educational application that guides students through preparing speech content while teaching them how to do so effectively. SPEAKS uses a scripted, chat-based interaction with a humorous and empathetic cockatoo character, who leads users step by step through seven stages of speech preparation. These include defining audience knowledge, setting learning goals for the audience, crafting engaging introductions, and ensuring coherence between the introduction and conclusion. The tool is grounded in public speaking pedagogy and expert interviews with 13 public speaking instructors, which informed both the instructional content and the design principles. We conducted a user study with 17 participants to evaluate SPEAKS in terms of perceived usability, perceived usefulness, perceived learning, and its impact on public speaking-related confidence. The results show that SPEAKS is easy to learn and use, and participants found its guidance-based support helpful for preparing speech content. Importantly, users reported a significant increase in their confidence to deliver a good speech after using the tool. Qualitative feedback further confirmed the tool’s usability and highlighted its strengths in supporting structured content development. Participants also identified areas for improvement, which can inform future iterations of the application. Overall, the findings suggest that guidance-based software like SPEAKS can effectively supplement traditional public speaking training by addressing the often-overlooked aspect of content preparation, especially in contexts where in-person resources are limited.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Enhancing presentation skills: key technical features of automated feedback systems - a systematic feature analysis.\n \n \n \n \n\n\n \n Hummel, S.; Schneider, J.; Mouhammad, N.; Klemke, R.; and Di Mitri, D.\n\n\n \n\n\n\n International Journal of Technology Enhanced Learning, 17(6): 1–25. 2025.\n \n\n\n\n
\n\n\n\n \n \n \"EnhancingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hummel_enhancing_2025,\n\ttitle = {Enhancing presentation skills: key technical features of automated feedback systems - a systematic feature analysis},\n\tvolume = {17},\n\tissn = {1753-5255, 1753-5263},\n\tshorttitle = {Enhancing presentation skills},\n\turl = {http://www.inderscience.com/link.php?id=148593},\n\tdoi = {10.1504/IJTEL.2025.148593},\n\tlanguage = {en},\n\tnumber = {6},\n\turldate = {2025-10-21},\n\tjournal = {International Journal of Technology Enhanced Learning},\n\tauthor = {Hummel, Stefan and Schneider, Jan and Mouhammad, Nina and Klemke, Roland and Di Mitri, Daniele},\n\tyear = {2025},\n\tpages = {1--25},\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Through the Telescope: A Systematic Review of Intelligent Tutoring Systems and Their Applications in Psychomotor Skill Learning.\n \n \n \n \n\n\n \n Romano, G.; Schneider, J.; Di Mitri, D.; and Drachsler, H.\n\n\n \n\n\n\n International Journal of Artificial Intelligence in Education. November 2025.\n \n\n\n\n
\n\n\n\n \n \n \"ThroughPaper\n  \n \n \n \"Through paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{romano_through_2025,\n\ttitle = {Through the {Telescope}: {A} {Systematic} {Review} of {Intelligent} {Tutoring} {Systems} and {Their} {Applications} in {Psychomotor} {Skill} {Learning}},\n\tcopyright = {Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License},\n\tissn = {1560-4306},\n\tshorttitle = {Through the {Telescope}},\n\turl = {https://doi.org/10.1007/s40593-025-00526-1},\n\tdoi = {10.1007/s40593-025-00526-1},\n\tabstract = {Intelligent Tutoring Systems (ITS) for psychomotor skills provide an accessible, scalable, and efficient solution, compared to tutors. Despite successes in the past, the research progress seems stagnating. Part of the reasons can be due to developing ITS for a very specific skill or task. In consequence, skills and the applications of those in their entirety are not presented.},\n\tlanguage = {en},\n\turldate = {2025-11-19},\n\tjournal = {International Journal of Artificial Intelligence in Education},\n\tauthor = {Romano, Gianluca and Schneider, Jan and Di Mitri, Daniele and Drachsler, Hendrik},\n\tmonth = nov,\n\tyear = {2025},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/8VUIA2FD/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Intelligent Tutoring Systems (ITS) for psychomotor skills provide an accessible, scalable, and efficient solution, compared to tutors. Despite successes in the past, the research progress seems stagnating. Part of the reasons can be due to developing ITS for a very specific skill or task. In consequence, skills and the applications of those in their entirety are not presented.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluating WEBPOSE, a Posture Feedback System for Oral Presentations.\n \n \n \n \n\n\n \n Hummel, S.; Alomari, M.; Schneider, J.; Mouhammad, N.; Klemke, R.; and Di Mitri, D.\n\n\n \n\n\n\n European Conference on e-Learning, 24(1): 162–169. October 2025.\n \n\n\n\n
\n\n\n\n \n \n \"EvaluatingPaper\n  \n \n \n \"Evaluating paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hummel_evaluating_2025,\n\ttitle = {Evaluating {WEBPOSE}, a {Posture} {Feedback} {System} for {Oral} {Presentations}},\n\tvolume = {24},\n\tcopyright = {Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License},\n\tissn = {2048-8645},\n\turl = {https://papers.academic-conferences.org/index.php/ecel/article/view/4285},\n\tdoi = {10.34190/ecel.24.1.4285},\n\tabstract = {Effective oral communication is a crucial skill in academic and professional contexts. However, practising and refining these skills is challenging without structured guidance and feedback. This paper presents a user evaluation of WEBPOSE, a web-based Oral Presentation Automated Feedback (OPAF) system that provides immediate feedback on posture to improve non-verbal communication skills. In this study, WEBPOSE was tested with sixteen researchers specialising in educational technology. Using a mixed-methods approach, which included a Technology Acceptance Model (TAM) questionnaire, open-ended questions, and observational data, we investigated the perceived user experience, usability, and usefulness of the system. The results indicate that WEBPOSE was generally perceived as user-friendly and beneficial for fostering self-awareness around body language and presentation timing. Moreover, user feedback also highlighted non-functional and functional points of improvement for WEBPOSE, such as improving the visualisations of the system status, and the display of the immediate feedback. This paper concludes with design implications for improving user guidance, feedback mechanisms, and the integration of structured rehearsal stages. These insights aim to inform the future development of scalable, user-centred OPAF systems that can effectively support the development of presentation skills.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2025-10-19},\n\tjournal = {European Conference on e-Learning},\n\tauthor = {Hummel, Stefan and Alomari, Mohamad and Schneider, Jan and Mouhammad, Nina and Klemke, Roland and Di Mitri, Daniele},\n\tmonth = oct,\n\tyear = {2025},\n\tpages = {162--169},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/EUAKIIR4/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Effective oral communication is a crucial skill in academic and professional contexts. However, practising and refining these skills is challenging without structured guidance and feedback. This paper presents a user evaluation of WEBPOSE, a web-based Oral Presentation Automated Feedback (OPAF) system that provides immediate feedback on posture to improve non-verbal communication skills. In this study, WEBPOSE was tested with sixteen researchers specialising in educational technology. Using a mixed-methods approach, which included a Technology Acceptance Model (TAM) questionnaire, open-ended questions, and observational data, we investigated the perceived user experience, usability, and usefulness of the system. The results indicate that WEBPOSE was generally perceived as user-friendly and beneficial for fostering self-awareness around body language and presentation timing. Moreover, user feedback also highlighted non-functional and functional points of improvement for WEBPOSE, such as improving the visualisations of the system status, and the display of the immediate feedback. This paper concludes with design implications for improving user guidance, feedback mechanisms, and the integration of structured rehearsal stages. These insights aim to inform the future development of scalable, user-centred OPAF systems that can effectively support the development of presentation skills.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2024\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n I don't have time! But keep me in the loop: Co-designing requirements for a learning analytics cockpit with teachers.\n \n \n \n \n\n\n \n Karademir, O.; Di Mitri, D.; Schneider, J.; Jivet, I.; Allmang, J.; Gombert, S.; Kubsch, M.; Neumann, K.; and Drachsler, H.\n\n\n \n\n\n\n Journal of Computer Assisted Learning, 40(6): 2681–2699. December 2024.\n \n\n\n\n
\n\n\n\n \n \n \"IPaper\n  \n \n \n \"I paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{karademir_i_2024,\n\ttitle = {I don't have time! {But} keep me in the loop: {Co}-designing requirements for a learning analytics cockpit with teachers},\n\tvolume = {40},\n\tcopyright = {All rights reserved},\n\tissn = {0266-4909, 1365-2729},\n\tshorttitle = {I don't have time! {But} keep me in the loop},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12997},\n\tdoi = {10.1111/jcal.12997},\n\tabstract = {Abstract\n            \n              Background\n              Teacher dashboards can help secondary school teachers manage online learning activities and inform instructional decisions by visualising information about class learning. However, when designing teacher dashboards, it is not trivial to choose which information to display, because not all of the vast amount of information retrieved from digital learning environments is useful for teaching. Information elicited from formative assessment (FA), though, is a strong predictor for student performance and can be a useful data source for effective teacher dashboards. Especially in the secondary education context, FA and feedback on FA, have been extensively studied and shown to positively affect student learning outcomes. Moreover, secondary teachers struggle to make sense of the information displayed in dashboards and decide on pedagogical actions, such as providing feedback to students.\n            \n            \n              Objectives\n              To facilitate the provision of feedback for secondary school teachers via a teacher dashboard, this study identifies requirements for designing a Learning Analytics Cockpit (LA Cockpit), that is, (1) a teacher dashboard that provides teachers with visualisations of results from formative assessment (FA) and (2) a feedback system that supports teachers in providing feedback to students.\n            \n            \n              Methods\n              This study was conducted in the context of STEM classes and is based on semi‐structured co‐design interviews with German secondary school teachers. In these interviews, we first explored challenges teachers encountered in monitoring students' learning and providing feedback. Second, in the ideation phase, teachers were asked to define features an LA Cockpit for FA should have. Finally, in the evaluation phase, we provided teachers with a design template for an LA Cockpit, the LAC\\_Template, which was built upon our previous work and feedback theory, and asked them to evaluate and improve it. Further design requirements were derived based on the evaluation of the LAC\\_Template and teachers' suggestions for improvement.\n            \n            \n              Results\n              We derived 16 requirements for designing an LA Cockpit for FA in secondary schools. Findings from the interviews indicated that the feedback system of an LA Cockpit should address teachers' time limitations in giving students individualised feedback. It should therefore be designed to minimise the steps required to deliver feedback. To reduce workload, teachers requested an automated reminder to send feedback, but with the ability to adjust feedback to the learning context. Such a semi‐automated feedback system can help teachers support students individually but also underline the importance of actively involving teachers in the feedback loop and giving them control when using such technologies in secondary school practice. A challenge for future teacher dashboard designs could be to find a balance between technology and teacher control that utilises the strengths of both in a beneficial combination.\n            \n          , \n            Lay Description\n            \n              What is already known about this topic\n              \n                \n                  \n                    Despite the potential of teacher dashboards to aid instruction, their designs often result in teachers struggling to derive insights from dashboards\n                  \n                  \n                    Most teacher dashboards are designed to display student information but not to facilitate the provision of feedback from teachers to students\n                  \n                  \n                    Results from formative assessment (FA) serve as a strong predictor for student performance, making them a valuable data source for teacher dashboards\n                  \n                \n              \n            \n            \n              What this paper adds\n              \n                \n                  \n                    This paper provides guidelines to develop a Learning Analytics Cockpit, functioning as a teacher dashboard with an integrated feedback system for FA\n                  \n                  \n                    A Learning Analytics Cockpit has the potential to mitigate teachers' lack of time in providing individualised feedback to students\n                  \n                  \n                    While automating feedback can reduce workload, teachers prefer active involvement over complete automation.\n                  \n                \n              \n            \n            \n              Implications for practice and/or policy\n              \n                \n                  \n                    We show how teacher dashboards can go beyond just visualising students' information and enhanced to enable the provision of feedback\n                  \n                  \n                    When incorporating automated features into teacher dashboards, it is crucial to actively involve the role of teachers to ensure beneficial cooperation between teachers and technology},\n\tlanguage = {en},\n\tnumber = {6},\n\turldate = {2025-10-16},\n\tjournal = {Journal of Computer Assisted Learning},\n\tauthor = {Karademir, Onur and Di Mitri, Daniele and Schneider, Jan and Jivet, Ioana and Allmang, Jörn and Gombert, Sebastian and Kubsch, Marcus and Neumann, Knut and Drachsler, Hendrik},\n\tmonth = dec,\n\tyear = {2024},\n\tpages = {2681--2699},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/9W8KYWZX/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Abstract Background Teacher dashboards can help secondary school teachers manage online learning activities and inform instructional decisions by visualising information about class learning. However, when designing teacher dashboards, it is not trivial to choose which information to display, because not all of the vast amount of information retrieved from digital learning environments is useful for teaching. Information elicited from formative assessment (FA), though, is a strong predictor for student performance and can be a useful data source for effective teacher dashboards. Especially in the secondary education context, FA and feedback on FA, have been extensively studied and shown to positively affect student learning outcomes. Moreover, secondary teachers struggle to make sense of the information displayed in dashboards and decide on pedagogical actions, such as providing feedback to students. Objectives To facilitate the provision of feedback for secondary school teachers via a teacher dashboard, this study identifies requirements for designing a Learning Analytics Cockpit (LA Cockpit), that is, (1) a teacher dashboard that provides teachers with visualisations of results from formative assessment (FA) and (2) a feedback system that supports teachers in providing feedback to students. Methods This study was conducted in the context of STEM classes and is based on semi‐structured co‐design interviews with German secondary school teachers. In these interviews, we first explored challenges teachers encountered in monitoring students' learning and providing feedback. Second, in the ideation phase, teachers were asked to define features an LA Cockpit for FA should have. Finally, in the evaluation phase, we provided teachers with a design template for an LA Cockpit, the LAC_Template, which was built upon our previous work and feedback theory, and asked them to evaluate and improve it. Further design requirements were derived based on the evaluation of the LAC_Template and teachers' suggestions for improvement. Results We derived 16 requirements for designing an LA Cockpit for FA in secondary schools. Findings from the interviews indicated that the feedback system of an LA Cockpit should address teachers' time limitations in giving students individualised feedback. It should therefore be designed to minimise the steps required to deliver feedback. To reduce workload, teachers requested an automated reminder to send feedback, but with the ability to adjust feedback to the learning context. Such a semi‐automated feedback system can help teachers support students individually but also underline the importance of actively involving teachers in the feedback loop and giving them control when using such technologies in secondary school practice. A challenge for future teacher dashboard designs could be to find a balance between technology and teacher control that utilises the strengths of both in a beneficial combination. , Lay Description What is already known about this topic Despite the potential of teacher dashboards to aid instruction, their designs often result in teachers struggling to derive insights from dashboards Most teacher dashboards are designed to display student information but not to facilitate the provision of feedback from teachers to students Results from formative assessment (FA) serve as a strong predictor for student performance, making them a valuable data source for teacher dashboards What this paper adds This paper provides guidelines to develop a Learning Analytics Cockpit, functioning as a teacher dashboard with an integrated feedback system for FA A Learning Analytics Cockpit has the potential to mitigate teachers' lack of time in providing individualised feedback to students While automating feedback can reduce workload, teachers prefer active involvement over complete automation. Implications for practice and/or policy We show how teacher dashboards can go beyond just visualising students' information and enhanced to enable the provision of feedback When incorporating automated features into teacher dashboards, it is crucial to actively involve the role of teachers to ensure beneficial cooperation between teachers and technology\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Achieving Tailored Feedback by Means of a Teacher Dashboard? Insights into Teachers’ Feedback Practices.\n \n \n \n \n\n\n \n Borgards, L.; Karademir, O.; Strauß, S.; Di Mitri, D.; Kubsch, M.; Brobeil, M.; Grimm, A.; Gombert, S.; Neumann, K.; Drachsler, H.; Scheffel, M.; and Rummel, N.\n\n\n \n\n\n\n In Technology Enhanced Learning for Inclusive and Equitable Quality Education: 19th European Conference on Technology Enhanced Learning, EC-TEL 2024, Krems, Austria, September 16–20, 2024, Proceedings, Part II, pages 75–80, Berlin, Heidelberg, September 2024. Springer-Verlag\n \n\n\n\n
\n\n\n\n \n \n \"AchievingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{borgards_achieving_2024,\n\taddress = {Berlin, Heidelberg},\n\ttitle = {Achieving {Tailored} {Feedback} by {Means} of a {Teacher} {Dashboard}? {Insights} into {Teachers}’ {Feedback} {Practices}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-72311-7},\n\tshorttitle = {Achieving {Tailored} {Feedback} by {Means} of a {Teacher} {Dashboard}?},\n\turl = {https://doi.org/10.1007/978-3-031-72312-4_8},\n\tdoi = {10.1007/978-3-031-72312-4_8},\n\tabstract = {Providing feedback is a crucial element of teaching and powerful for improving student learning. Yet, monitoring and assessing individual students’ performance to provide tailored feedback can be challenging, especially in large classes. Teacher dashboards can provide teachers with information about individual students and support them in identifying student needs and providing feedback aligned with students’ performance. We conducted a field study in German secondary schools (N = 7 teachers, n = 225 students) to investigate which dashboard information teachers based their feedback on, and to what extent teachers used the information to tailor their feedback to students’ needs. Our study adds empirical evidence that teachers can utilise teacher dashboards to provide students on different performance levels with differential feedback.},\n\turldate = {2024-10-31},\n\tbooktitle = {Technology {Enhanced} {Learning} for {Inclusive} and {Equitable} {Quality} {Education}: 19th {European} {Conference} on {Technology} {Enhanced} {Learning}, {EC}-{TEL} 2024, {Krems}, {Austria}, {September} 16–20, 2024, {Proceedings}, {Part} {II}},\n\tpublisher = {Springer-Verlag},\n\tauthor = {Borgards, Lena and Karademir, Onur and Strauß, Sebastian and Di Mitri, Daniele and Kubsch, Marcus and Brobeil, Markus and Grimm, Adrian and Gombert, Sebastian and Neumann, Knut and Drachsler, Hendrik and Scheffel, Maren and Rummel, Nikol},\n\tmonth = sep,\n\tyear = {2024},\n\tpages = {75--80},\n}\n\n\n\n
\n
\n\n\n
\n Providing feedback is a crucial element of teaching and powerful for improving student learning. Yet, monitoring and assessing individual students’ performance to provide tailored feedback can be challenging, especially in large classes. Teacher dashboards can provide teachers with information about individual students and support them in identifying student needs and providing feedback aligned with students’ performance. We conducted a field study in German secondary schools (N = 7 teachers, n = 225 students) to investigate which dashboard information teachers based their feedback on, and to what extent teachers used the information to tailor their feedback to students’ needs. Our study adds empirical evidence that teachers can utilise teacher dashboards to provide students on different performance levels with differential feedback.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Preserving Privacy in Multimodal Learning Analytics with Visual Animation of Kinematic Data.\n \n \n \n \n\n\n \n Di Mitri, D.; Epp, A.; and Schneider, J.\n\n\n \n\n\n\n In Casalino, G.; Di Fuccio, R.; Fulantelli, G.; Raviolo, P.; Rivoltella, P. C.; Taibi, D.; and Toto, G. A., editor(s), Higher Education Learning Methodologies and Technologies Online, volume 2076, pages 678–698. Springer Nature Switzerland, Cham, 2024.\n Series Title: Communications in Computer and Information Science\n\n\n\n
\n\n\n\n \n \n \"PreservingPaper\n  \n \n \n \"Preserving paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{casalino_preserving_2024,\n\taddress = {Cham},\n\ttitle = {Preserving {Privacy} in {Multimodal} {Learning} {Analytics} with {Visual} {Animation} of {Kinematic} {Data}},\n\tvolume = {2076},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-67350-4 978-3-031-67351-1},\n\turl = {https://link.springer.com/10.1007/978-3-031-67351-1_45},\n\tlanguage = {en},\n\turldate = {2024-10-31},\n\tbooktitle = {Higher {Education} {Learning} {Methodologies} and {Technologies} {Online}},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {Di Mitri, Daniele and Epp, Aleksandr and Schneider, Jan},\n\teditor = {Casalino, Gabriella and Di Fuccio, Raffaele and Fulantelli, Giovanni and Raviolo, Paolo and Rivoltella, Pier Cesare and Taibi, Davide and Toto, Giusi Antonia},\n\tyear = {2024},\n\tdoi = {10.1007/978-3-031-67351-1_45},\n\tnote = {Series Title: Communications in Computer and Information Science},\n\tpages = {678--698},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/V7D3MAQW/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal and immersive systems for skills development and education.\n \n \n \n \n\n\n \n Di Mitri, D.; Limbu, B.; Schneider, J.; Iren, D.; Giannakos, M.; and Klemke, R.\n\n\n \n\n\n\n British Journal of Educational Technology, 55(4): 1456–1464. 2024.\n _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/bjet.13483\n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n \n \"Multimodal paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{di_mitri_multimodal_2024,\n\ttitle = {Multimodal and immersive systems for skills development and education},\n\tvolume = {55},\n\tcopyright = {All rights reserved},\n\tissn = {1467-8535},\n\turl = {https://onlinelibrary.wiley.com/doi/abs/10.1111/bjet.13483},\n\tdoi = {10.1111/bjet.13483},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2024-10-31},\n\tjournal = {British Journal of Educational Technology},\n\tauthor = {Di Mitri, Daniele and Limbu, Bibeg and Schneider, Jan and Iren, Deniz and Giannakos, Michail and Klemke, Roland},\n\tyear = {2024},\n\tnote = {\\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/bjet.13483},\n\tpages = {1456--1464},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/M4ELTM8G/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n What Indicators Can I Serve You with? An Evaluation of a Research-Driven Learning Analytics Indicator Repository.\n \n \n \n \n\n\n \n Ahmad, A.; Schneider, J.; Weidlich, J.; Di Mitri, D.; Yau, J. Y.; Schiffner, D.; and Drachsler, H.\n\n\n \n\n\n\n In pages 58–68, October 2024. \n \n\n\n\n
\n\n\n\n \n \n \"WhatPaper\n  \n \n \n \"What paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ahmad_what_2024,\n\ttitle = {What {Indicators} {Can} {I} {Serve} {You} with? {An} {Evaluation} of a {Research}-{Driven} {Learning} {Analytics} {Indicator} {Repository}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-989-758-562-3},\n\tshorttitle = {What {Indicators} {Can} {I} {Serve} {You} with?},\n\turl = {https://www.scitepress.org/Link.aspx?doi=10.5220/0010995800003182},\n\tdoi = {10.5220/0010995800003182},\n\tabstract = {Digital Library},\n\turldate = {2024-10-18},\n\tauthor = {Ahmad, Atezaz and Schneider, Jan and Weidlich, Joshua and Di Mitri, Daniele and Yau, Jane Yin-Kim and Schiffner, Daniel and Drachsler, Hendrik},\n\tmonth = oct,\n\tyear = {2024},\n\tpages = {58--68},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/S5UZ9KD6/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Digital Library\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Human-centric Approach to Explain Evolving Data: A Case Study on Education.\n \n \n \n \n\n\n \n Casalino, G.; Castellano, G.; Di Mitri, D.; Kaczmarek-Majer, K.; and Zaza, G.\n\n\n \n\n\n\n In 2024 IEEE International Conference on Evolving and Adaptive Intelligent Systems (EAIS), pages 1–8, 2024. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{casalino_human-centric_2024,\n\ttitle = {A {Human}-centric {Approach} to {Explain} {Evolving} {Data}: {A} {Case} {Study} on {Education}},\n\tcopyright = {All rights reserved},\n\tshorttitle = {A {Human}-centric {Approach} to {Explain} {Evolving} {Data}},\n\turl = {https://ieeexplore.ieee.org/abstract/document/10569098/},\n\tdoi = {10.1109/EAIS58494.2024.10569098},\n\turldate = {2024-09-14},\n\tbooktitle = {2024 {IEEE} {International} {Conference} on {Evolving} and {Adaptive} {Intelligent} {Systems} ({EAIS})},\n\tpublisher = {IEEE},\n\tauthor = {Casalino, Gabriella and Castellano, Giovanna and Di Mitri, Daniele and Kaczmarek-Majer, Katarzyna and Zaza, Gianluca},\n\tyear = {2024},\n\tpages = {1--8},\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beyond hard workout: A multimodal framework for personalised running training with immersive technologies.\n \n \n \n \n\n\n \n Cardenas Hernandez, F. P.; Schneider, J.; Di Mitri, D.; Jivet, I.; and Drachsler, H.\n\n\n \n\n\n\n British Journal of Educational Technology, 55(4): 1528–1559. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"BeyondPaper\n  \n \n \n \"Beyond paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{cardenashernandez_beyond_2024,\n\ttitle = {Beyond hard workout: {A} multimodal framework for personalised running training with immersive technologies},\n\tvolume = {55},\n\tcopyright = {All rights reserved},\n\tissn = {0007-1013, 1467-8535},\n\tshorttitle = {Beyond hard workout},\n\turl = {https://bera-journals.onlinelibrary.wiley.com/doi/10.1111/bjet.13445},\n\tdoi = {10.1111/bjet.13445},\n\tabstract = {Abstract\n            \n              \n              Training to run is not straightforward since without proper personalised supervision and planning, people will not improve their performance and will increase the risk of injuries. This study aims to identify the different factors that influence running training programmes, examining the benefits, challenges or limitations of personalised plans. Moreover, this study explores how multimodal, immersive and artificial intelligence technologies can support personalised training. We conducted an exploratory sequential mixed research consisting of interviews with 11 running coaches from different countries and a survey of 12 running coaches. Based on the interviews and survey analysis, we identified and extracted relevant factors of the training process. We identified four relevant aspects for running training: physical, technical, mental and body awareness. Using these aspects as a reference, we derived a framework using a bottom‐up approach. This framework proposes multimodal, immersive and artificial intelligence technologies to facilitate personalised running training. It also allows coaches to personally guide their athletes on each aspect.\n            \n            \n              \n              \n                \n                  \n                    Practitioner notes\n                  \n                  \n                    What is already known about this topic\n                    \n                      \n                        Running is a popular sport that provides health benefits and is practised by many people around the world.\n                      \n                      \n                        Training is a process that enables athletes to improve their development in various aspects of their sport; in the case of running, it helps them to increase their speed and endurance.\n                      \n                      \n                        Personalised training supports the needs and abilities of athletes, by helping them to achieve their potential through individualised activities or programmes.\n                      \n                      \n                        Sports science research indicates that personalised training can be improved by applying technology to tackle its challenges and limitations.\n                      \n                    \n                  \n                  \n                    What this paper adds\n                    \n                      \n                        We show that personalising the training requires not only focusing on the runners' physical condition but also on their mental, technical and body awareness aspects, where each of them has a different adaptation to training.\n                      \n                      \n                        We show that multimodal and immersive technologies offer suitable and portable ways to measure and target the mental and body awareness aspects during running training.\n                      \n                    \n                  \n                  \n                    Implications for practice and/or policy\n                    \n                      \n                        This paper presents a list of factors, measures and devices that coaches can use to plan and design their training sessions in a more personalised manner.\n                      \n                      \n                        This study can serve as a foundation for future research that aims to identify and target the various factors that influence the learning and training of sports.},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2024-09-14},\n\tjournal = {British Journal of Educational Technology},\n\tauthor = {Cardenas Hernandez, Fernando Pedro and Schneider, Jan and Di Mitri, Daniele and Jivet, Ioana and Drachsler, Hendrik},\n\tyear = {2024},\n\tpages = {1528--1559},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/MMGDLL22/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Abstract Training to run is not straightforward since without proper personalised supervision and planning, people will not improve their performance and will increase the risk of injuries. This study aims to identify the different factors that influence running training programmes, examining the benefits, challenges or limitations of personalised plans. Moreover, this study explores how multimodal, immersive and artificial intelligence technologies can support personalised training. We conducted an exploratory sequential mixed research consisting of interviews with 11 running coaches from different countries and a survey of 12 running coaches. Based on the interviews and survey analysis, we identified and extracted relevant factors of the training process. We identified four relevant aspects for running training: physical, technical, mental and body awareness. Using these aspects as a reference, we derived a framework using a bottom‐up approach. This framework proposes multimodal, immersive and artificial intelligence technologies to facilitate personalised running training. It also allows coaches to personally guide their athletes on each aspect. Practitioner notes What is already known about this topic Running is a popular sport that provides health benefits and is practised by many people around the world. Training is a process that enables athletes to improve their development in various aspects of their sport; in the case of running, it helps them to increase their speed and endurance. Personalised training supports the needs and abilities of athletes, by helping them to achieve their potential through individualised activities or programmes. Sports science research indicates that personalised training can be improved by applying technology to tackle its challenges and limitations. What this paper adds We show that personalising the training requires not only focusing on the runners' physical condition but also on their mental, technical and body awareness aspects, where each of them has a different adaptation to training. We show that multimodal and immersive technologies offer suitable and portable ways to measure and target the mental and body awareness aspects during running training. Implications for practice and/or policy This paper presents a list of factors, measures and devices that coaches can use to plan and design their training sessions in a more personalised manner. This study can serve as a foundation for future research that aims to identify and target the various factors that influence the learning and training of sports.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Predicting Item Difficulty and Item Response Time with Scalar-mixed Transformer Encoder Models and Rational Network Regression Heads.\n \n \n \n \n\n\n \n Gombert, S.; Menzel, L.; Di Mitri, D.; and Drachsler, H.\n\n\n \n\n\n\n In Kochmar, E.; Bexte, M.; Burstein, J.; Horbach, A.; Laarmann-Quante, R.; Tack, A.; Yaneva, V.; and Yuan, Z., editor(s), Proceedings of the 19th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2024), pages 483–492, Mexico City, Mexico, June 2024. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"PredictingPaper\n  \n \n \n \"Predicting paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{gombert_predicting_2024,\n\taddress = {Mexico City, Mexico},\n\ttitle = {Predicting {Item} {Difficulty} and {Item} {Response} {Time} with {Scalar}-mixed {Transformer} {Encoder} {Models} and {Rational} {Network} {Regression} {Heads}},\n\tcopyright = {All rights reserved},\n\turl = {https://aclanthology.org/2024.bea-1.40},\n\tabstract = {This paper describes a contribution to the BEA 2024 Shared Task on Automated Prediction of Item Difficulty and Response Time. The participants in this shared task are to develop models for predicting the difficulty and response time of multiple-choice items in the medical field. These items were taken from the United States Medical Licensing Examination® (USMLE®), a high-stakes medical exam. For this purpose, we evaluated multiple BERT-like pre-trained transformer encoder models, which we combined with Scalar Mixing and two custom 2-layer classification heads using learnable Rational Activations as an activation function, each for predicting one of the two variables of interest in a multi-task setup. Our best models placed first out of 43 for predicting item difficulty and fifth out of 34 for predicting Item Response Time.},\n\turldate = {2024-07-02},\n\tbooktitle = {Proceedings of the 19th {Workshop} on {Innovative} {Use} of {NLP} for {Building} {Educational} {Applications} ({BEA} 2024)},\n\tpublisher = {Association for Computational Linguistics},\n\tauthor = {Gombert, Sebastian and Menzel, Lukas and Di Mitri, Daniele and Drachsler, Hendrik},\n\teditor = {Kochmar, Ekaterina and Bexte, Marie and Burstein, Jill and Horbach, Andrea and Laarmann-Quante, Ronja and Tack, Anaïs and Yaneva, Victoria and Yuan, Zheng},\n\tmonth = jun,\n\tyear = {2024},\n\tpages = {483--492},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/RWATRJM8/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n This paper describes a contribution to the BEA 2024 Shared Task on Automated Prediction of Item Difficulty and Response Time. The participants in this shared task are to develop models for predicting the difficulty and response time of multiple-choice items in the medical field. These items were taken from the United States Medical Licensing Examination® (USMLE®), a high-stakes medical exam. For this purpose, we evaluated multiple BERT-like pre-trained transformer encoder models, which we combined with Scalar Mixing and two custom 2-layer classification heads using learnable Rational Activations as an activation function, each for predicting one of the two variables of interest in a multi-task setup. Our best models placed first out of 43 for predicting item difficulty and fifth out of 34 for predicting Item Response Time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From the Automated Assessment of Student Essay Content to Highly Informative Feedback: a Case Study.\n \n \n \n \n\n\n \n Gombert, S.; Fink, A.; Giorgashvili, T.; Jivet, I.; Di Mitri, D.; Yau, J.; Frey, A.; and Drachsler, H.\n\n\n \n\n\n\n International Journal of Artificial Intelligence in Education. January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n \n \"From paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{gombert_automated_2024,\n\ttitle = {From the {Automated} {Assessment} of {Student} {Essay} {Content} to {Highly} {Informative} {Feedback}: a {Case} {Study}},\n\tcopyright = {All rights reserved},\n\tissn = {1560-4292, 1560-4306},\n\tshorttitle = {From the {Automated} {Assessment} of {Student} {Essay} {Content} to {Highly} {Informative} {Feedback}},\n\turl = {https://link.springer.com/10.1007/s40593-023-00387-6},\n\tdoi = {10.1007/s40593-023-00387-6},\n\tabstract = {Abstract\n            \n              Various studies empirically proved the value of highly informative feedback for enhancing learner success. However, digital educational technology has yet to catch up as automated feedback is often provided shallowly. This paper presents a case study on implementing a pipeline that provides German-speaking university students enrolled in an introductory-level educational psychology lecture with content-specific feedback for a lecture assignment. In the assignment, students have to discuss the usefulness and educational grounding (i.e., connection to working memory, metacognition or motivation) of ten learning tips presented in a video within essays. Through our system, students received feedback on the correctness of their solutions and content areas they needed to improve. For this purpose, we implemented a natural language processing pipeline with two steps: (1) segmenting the essays and (2) predicting codes from the resulting segments used to generate feedback texts. As training data for the model in each processing step, we used 689 manually labelled essays submitted by the previous student cohort. We then evaluated approaches based on\n              GBERT\n              ,\n              T5,\n              and bag-of-words baselines for scoring them. Both pipeline steps, especially the transformer-based models, demonstrated high performance. In the final step, we evaluated the feedback using a randomised controlled trial. The control group received\n              feedback as usual (essential feedback),\n              while the treatment group received\n              highly informative feedback\n              based on the natural language processing pipeline\n              .\n              We then used a six items long survey to test the perception of feedback. We conducted an ordinary least squares analysis to model these items as dependent variables, which showed that highly informative feedback had positive effects on helpfulness and reflection.},\n\tlanguage = {en},\n\turldate = {2024-09-14},\n\tjournal = {International Journal of Artificial Intelligence in Education},\n\tauthor = {Gombert, Sebastian and Fink, Aron and Giorgashvili, Tornike and Jivet, Ioana and Di Mitri, Daniele and Yau, Jane and Frey, Andreas and Drachsler, Hendrik},\n\tmonth = jan,\n\tyear = {2024},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/5CA99EIM/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Abstract Various studies empirically proved the value of highly informative feedback for enhancing learner success. However, digital educational technology has yet to catch up as automated feedback is often provided shallowly. This paper presents a case study on implementing a pipeline that provides German-speaking university students enrolled in an introductory-level educational psychology lecture with content-specific feedback for a lecture assignment. In the assignment, students have to discuss the usefulness and educational grounding (i.e., connection to working memory, metacognition or motivation) of ten learning tips presented in a video within essays. Through our system, students received feedback on the correctness of their solutions and content areas they needed to improve. For this purpose, we implemented a natural language processing pipeline with two steps: (1) segmenting the essays and (2) predicting codes from the resulting segments used to generate feedback texts. As training data for the model in each processing step, we used 689 manually labelled essays submitted by the previous student cohort. We then evaluated approaches based on GBERT , T5, and bag-of-words baselines for scoring them. Both pipeline steps, especially the transformer-based models, demonstrated high performance. In the final step, we evaluated the feedback using a randomised controlled trial. The control group received feedback as usual (essential feedback), while the treatment group received highly informative feedback based on the natural language processing pipeline . We then used a six items long survey to test the perception of feedback. We conducted an ordinary least squares analysis to model these items as dependent variables, which showed that highly informative feedback had positive effects on helpfulness and reflection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Following the Impact Chain of the LA Cockpit: An Intervention Study Investigating a Teacher Dashboard’s Effect on Student Learning.\n \n \n \n \n\n\n \n Karademir, O.; Borgards, L.; Di Mitri, D.; Strauß, S.; Kubsch, M.; Brobeil, M.; Grimm, A.; Gombert, S.; Rummel, N.; and Neumann, K.\n\n\n \n\n\n\n Journal of Learning Analytics, 11(2): 215–228. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"FollowingPaper\n  \n \n \n \"Following paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{karademir_following_2024,\n\ttitle = {Following the {Impact} {Chain} of the {LA} {Cockpit}: {An} {Intervention} {Study} {Investigating} a {Teacher} {Dashboard}’s {Effect} on {Student} {Learning}},\n\tvolume = {11},\n\tcopyright = {All rights reserved},\n\tshorttitle = {Following the {Impact} {Chain} of the {LA} {Cockpit}},\n\turl = {https://learning-analytics.info/index.php/JLA/article/view/8399},\n\tnumber = {2},\n\turldate = {2024-09-14},\n\tjournal = {Journal of Learning Analytics},\n\tauthor = {Karademir, Onur and Borgards, Lena and Di Mitri, Daniele and Strauß, Sebastian and Kubsch, Marcus and Brobeil, Markus and Grimm, Adrian and Gombert, Sebastian and Rummel, Nikol and Neumann, Knut},\n\tyear = {2024},\n\tpages = {215--228},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/LMT8R3VC/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Using Accessible Motion Capture in Educational Games for Sign language Learning.\n \n \n \n \n\n\n \n Tobias, J. L.; and Di Mitri, D.\n\n\n \n\n\n\n In Viberg, O.; Jivet, I.; Muñoz-Merino, P.; Perifanou, M.; and Papathoma, T., editor(s), Responsive and Sustainable Educational Futures, volume 14200, pages 762–767, Cham, 2023. Springer Nature Switzerland\n Series Title: Lecture Notes in Computer Science\n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{viberg_using_2023,\n\taddress = {Cham},\n\ttitle = {Using {Accessible} {Motion} {Capture} in {Educational} {Games} for {Sign} language {Learning}},\n\tvolume = {14200},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-42681-0},\n\turl = {https://link.springer.com/10.1007/978-3-031-42682-7_74},\n\tdoi = {10.1007/978-3-031-42682-7_74},\n\tlanguage = {en},\n\turldate = {2024-09-14},\n\tbooktitle = {Responsive and {Sustainable} {Educational} {Futures}},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {Tobias, Joshua Leon and Di Mitri, Daniele},\n\teditor = {Viberg, Olga and Jivet, Ioana and Muñoz-Merino, Pedro J. and Perifanou, Maria and Papathoma, Tina},\n\tyear = {2023},\n\tdoi = {10.1007/978-3-031-42682-7_74},\n\tnote = {Series Title: Lecture Notes in Computer Science},\n\tpages = {762--767},\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Measuring Efficacy of ALEKS as a Supportive Instructional Tool in K-12 Math Classroom with Underachieving Students.\n \n \n \n \n\n\n \n Khazanchi, R.; Di Mitri, D.; and Drachsler, H.\n\n\n \n\n\n\n Journal of Computers in Mathematics and Science Teaching, 42(2): 155–176. 2023.\n Publisher: Association for the Advancement of Computing in Education (AACE)\n\n\n\n
\n\n\n\n \n \n \"MeasuringPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{khazanchi_measuring_2023,\n\ttitle = {Measuring {Efficacy} of {ALEKS} as a {Supportive} {Instructional} {Tool} in {K}-12 {Math} {Classroom} with {Underachieving} {Students}},\n\tvolume = {42},\n\tcopyright = {All rights reserved},\n\turl = {https://www.learntechlib.org/p/221775/},\n\tnumber = {2},\n\turldate = {2024-09-14},\n\tjournal = {Journal of Computers in Mathematics and Science Teaching},\n\tauthor = {Khazanchi, Rashmi and Di Mitri, Daniele and Drachsler, Hendrik},\n\tyear = {2023},\n\tnote = {Publisher: Association for the Advancement of Computing in Education (AACE)},\n\tpages = {155--176},\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Privacy-preserving multimodal learning analytics using visual animations of kinematic data.\n \n \n \n \n\n\n \n Epp, A.; Schneider, J.; and Di Mitri, D.\n\n\n \n\n\n\n BOOKOF,214. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Privacy-preservingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{epp_privacy-preserving_2023,\n\ttitle = {Privacy-preserving multimodal learning analytics using visual animations of kinematic data},\n\tcopyright = {All rights reserved},\n\turl = {https://www.researchgate.net/profile/Francesca-Finestrone/publication/378268879_HELMeTO_2023_Book-of-abstracts/links/65d08fd601325d4652114308/HELMeTO-2023-Book-of-abstracts.pdf#page=243},\n\turldate = {2024-09-14},\n\tjournal = {BOOKOF},\n\tauthor = {Epp, Aleksandr and Schneider, Jan and Di Mitri, Daniele},\n\tyear = {2023},\n\tpages = {214},\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Students' expectations of Learning Analytics across Europe.\n \n \n \n \n\n\n \n Wollny, S.; Di Mitri, D.; Jivet, I.; Muñoz‐Merino, P.; Scheffel, M.; Schneider, J.; Tsai, Y.; Whitelock‐Wainwright, A.; Gašević, D.; and Drachsler, H.\n\n\n \n\n\n\n Journal of Computer Assisted Learning, 39(4): 1325–1338. August 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Students'Paper\n  \n \n \n \"Students' paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wollny_students_2023,\n\ttitle = {Students' expectations of {Learning} {Analytics} across {Europe}},\n\tvolume = {39},\n\tcopyright = {All rights reserved},\n\tissn = {0266-4909, 1365-2729},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12802},\n\tdoi = {10.1111/jcal.12802},\n\tabstract = {Abstract\n            \n              Background\n              Learning Analytics (LA) is an emerging field concerned with measuring, collecting, and analysing data about learners and their contexts to gain insights into learning processes. As the technology of Learning Analytics is evolving, many systems are being implemented. In this context, it is essential to understand stakeholders' expectations of LA across Higher Education Institutions (HEIs) for large‐scale implementations that take their needs into account.\n            \n            \n              Objectives\n              This study aims to contribute to knowledge about individual LA expectations of European higher education students. It may facilitate the strategy of stakeholder buy‐in, the transfer of LA insights across HEIs, and the development of international best practices and guidelines.\n            \n            \n              Methods\n              To this end, the study employs a ‘Student Expectations of Learning Analytics Questionnaire’ (SELAQ) survey of 417 students at the Goethe University Frankfurt (Germany) Based on this data, Multiple Linear Regressions are applied to determine how these students position themselves compared to students from Madrid (Spain), Edinburgh (United Kingdom) and the Netherlands, where SELAQ had already been implemented at HEIs.\n            \n            \n              Results and Conclusions\n              The results show that students' expectations at Goethe University Frankfurt are rather homogeneous regarding ‘LA Ethics and Privacy’ and ‘LA Service Features’. Furthermore, we found that European students generally show a consistent pattern of expectations of LA with a high degree of similarity across the HEIs examined. European HEIs face challenges more similar than anticipated. The HEI experience with implementing LA can be more easily transferred to other HEIs, suggesting standardized LA rather than tailor‐made solutions designed from scratch.\n            \n          , \n            Lay Description\n            \n              What is currently known about this topic\n              \n                \n                  \n                    Implementing Learning Analytics is a promising but challenging task.\n                  \n                  \n                    SELAQ is a questionnaire that can be used to determine students' expectations of Learning Analytics.\n                  \n                  \n                    Several institutions in Europe applied SELAQ to implement Learning Analytics that meet the needs of their students.\n                  \n                \n              \n            \n            \n              What this paper adds to this\n              \n                \n                  \n                    This study applies SELAQ for the first time at a German university and compares the results to studies from the UK, Spain, and the Netherlands.\n                  \n                  \n                    The participating students at the German university show relatively homogeneous expectations of Learning Analytics.\n                  \n                  \n                    The expectations towards Learning Analytics of students across Europe are comparable.\n                  \n                \n              \n            \n            \n              The implications of study findings for practitioners\n              \n                \n                  \n                    Learning Analytics systems should be shared across institutions rather than being implemented separately as tailored solutions.\n                  \n                  \n                    More standardized or modular Learning Analytics systems are reasonable.},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2024-09-14},\n\tjournal = {Journal of Computer Assisted Learning},\n\tauthor = {Wollny, Sebastian and Di Mitri, Daniele and Jivet, Ioana and Muñoz‐Merino, Pedro and Scheffel, Maren and Schneider, Jan and Tsai, Yi‐Shan and Whitelock‐Wainwright, Alexander and Gašević, Dragan and Drachsler, Hendrik},\n\tmonth = aug,\n\tyear = {2023},\n\tpages = {1325--1338},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/IUJBMXXF/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Abstract Background Learning Analytics (LA) is an emerging field concerned with measuring, collecting, and analysing data about learners and their contexts to gain insights into learning processes. As the technology of Learning Analytics is evolving, many systems are being implemented. In this context, it is essential to understand stakeholders' expectations of LA across Higher Education Institutions (HEIs) for large‐scale implementations that take their needs into account. Objectives This study aims to contribute to knowledge about individual LA expectations of European higher education students. It may facilitate the strategy of stakeholder buy‐in, the transfer of LA insights across HEIs, and the development of international best practices and guidelines. Methods To this end, the study employs a ‘Student Expectations of Learning Analytics Questionnaire’ (SELAQ) survey of 417 students at the Goethe University Frankfurt (Germany) Based on this data, Multiple Linear Regressions are applied to determine how these students position themselves compared to students from Madrid (Spain), Edinburgh (United Kingdom) and the Netherlands, where SELAQ had already been implemented at HEIs. Results and Conclusions The results show that students' expectations at Goethe University Frankfurt are rather homogeneous regarding ‘LA Ethics and Privacy’ and ‘LA Service Features’. Furthermore, we found that European students generally show a consistent pattern of expectations of LA with a high degree of similarity across the HEIs examined. European HEIs face challenges more similar than anticipated. The HEI experience with implementing LA can be more easily transferred to other HEIs, suggesting standardized LA rather than tailor‐made solutions designed from scratch. , Lay Description What is currently known about this topic Implementing Learning Analytics is a promising but challenging task. SELAQ is a questionnaire that can be used to determine students' expectations of Learning Analytics. Several institutions in Europe applied SELAQ to implement Learning Analytics that meet the needs of their students. What this paper adds to this This study applies SELAQ for the first time at a German university and compares the results to studies from the UK, Spain, and the Netherlands. The participating students at the German university show relatively homogeneous expectations of Learning Analytics. The expectations towards Learning Analytics of students across Europe are comparable. The implications of study findings for practitioners Learning Analytics systems should be shared across institutions rather than being implemented separately as tailored solutions. More standardized or modular Learning Analytics systems are reasonable.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hybrid Models for Knowledge Tracing: a Systematic Literature Review.\n \n \n \n \n\n\n \n Zanellati, A.; Di Mitri, D; Gabbrielli, M.; and Levrini, O.\n\n\n \n\n\n\n IEEE Transactions on Learning Technologies,1–16. 2023.\n Conference Name: IEEE Transactions on Learning Technologies\n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n \n \"Hybrid paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zanellati_hybrid_2023,\n\ttitle = {Hybrid {Models} for {Knowledge} {Tracing}: a {Systematic} {Literature} {Review}},\n\tcopyright = {All rights reserved},\n\tissn = {1939-1382},\n\tshorttitle = {Hybrid {Models} for {Knowledge} {Tracing}},\n\turl = {https://ieeexplore.ieee.org/document/10379123},\n\tdoi = {10.1109/TLT.2023.3348690},\n\tabstract = {Knowledge Tracing is a well-known problem in AI for Education, consisting of monitoring how the knowledge state of students changes during the learning process and accurately predicting their performance in future exercises.In recent years, many advances have been made thanks to various machine learning and deep learning techniques. Despite their satisfactory performances, they have some pitfalls, e.g. modeling one skill at a time, ignoring the relationships between different skills, or inconsistency for the predictions, i.e. sudden spikes and falls across time steps. For this reason, hybrid machine-learning techniques have also been explored. With this systematic literature review, we aim to illustrate the state of the art in this field. Specifically, we want to identify the potential and the frontiers in integrating prior knowledge sources in the traditional machine learning pipeline as a supplement to the normally considered data. We applied a qualitative analysis to distill a taxonomy with three dimensions: knowledge source, knowledge representation, and knowledge integration. Exploiting this taxonomy, we also conducted a quantitative analysis to detect the most common approaches.},\n\turldate = {2024-01-04},\n\tjournal = {IEEE Transactions on Learning Technologies},\n\tauthor = {Zanellati, Andrea and Di Mitri, D and Gabbrielli, Maurizio and Levrini, Olivia},\n\tyear = {2023},\n\tnote = {Conference Name: IEEE Transactions on Learning Technologies},\n\tpages = {1--16},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/M9I4XLTT/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Knowledge Tracing is a well-known problem in AI for Education, consisting of monitoring how the knowledge state of students changes during the learning process and accurately predicting their performance in future exercises.In recent years, many advances have been made thanks to various machine learning and deep learning techniques. Despite their satisfactory performances, they have some pitfalls, e.g. modeling one skill at a time, ignoring the relationships between different skills, or inconsistency for the predictions, i.e. sudden spikes and falls across time steps. For this reason, hybrid machine-learning techniques have also been explored. With this systematic literature review, we aim to illustrate the state of the art in this field. Specifically, we want to identify the potential and the frontiers in integrating prior knowledge sources in the traditional machine learning pipeline as a supplement to the normally considered data. We applied a qualitative analysis to distill a taxonomy with three dimensions: knowledge source, knowledge representation, and knowledge integration. Exploiting this taxonomy, we also conducted a quantitative analysis to detect the most common approaches.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Proceedings of the Doctoral Consortium of the 17th European Conference on Technology Enhanced Learning.\n \n \n \n \n\n\n \n Jivet, I.; Di Mitri, D.; Schneider, J.; Papamitsiou, Z.; and Fominykh, M.\n\n\n \n\n\n\n Volume 3292 of CEUR Workshop ProceedingsCEUR, Toulouse, France, September 2022.\n ISSN: 1613-0073\n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n \n \"Proceedings paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{jivet_proceedings_2022,\n\taddress = {Toulouse, France},\n\tseries = {{CEUR} {Workshop} {Proceedings}},\n\ttitle = {Proceedings of the {Doctoral} {Consortium} of the 17th {European} {Conference} on {Technology} {Enhanced} {Learning}},\n\tvolume = {3292},\n\tcopyright = {All rights reserved},\n\turl = {https://ceur-ws.org/Vol-3292/#xpreface},\n\tlanguage = {en},\n\turldate = {2022-12-01},\n\tpublisher = {CEUR},\n\tauthor = {Jivet, Ioana and Di Mitri, Daniele and Schneider, Jan and Papamitsiou, Zacharoula and Fominykh, Mikhail},\n\tmonth = sep,\n\tyear = {2022},\n\tnote = {ISSN: 1613-0073},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/LQ5C7QZG/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coding energy knowledge in constructed responses with explainable NLP models.\n \n \n \n \n\n\n \n Gombert, S.; Di Mitri, D.; Karademir, O.; Kubsch, M.; Kolbe, H.; Tautz, S.; Grimm, A.; Bohm, I.; Neumann, K.; and Drachsler, H.\n\n\n \n\n\n\n Journal of Computer Assisted Learning. 2022.\n _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jcal.12767\n\n\n\n
\n\n\n\n \n \n \"CodingPaper\n  \n \n \n \"Coding paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{gombert_coding_2022,\n\ttitle = {Coding energy knowledge in constructed responses with explainable {NLP} models},\n\tcopyright = {All rights reserved},\n\tissn = {1365-2729},\n\turl = {https://onlinelibrary.wiley.com/doi/abs/10.1111/jcal.12767},\n\tdoi = {10.1111/jcal.12767},\n\tabstract = {Background Formative assessments are needed to enable monitoring how student knowledge develops throughout a unit. Constructed response items which require learners to formulate their own free-text responses are well suited for testing their active knowledge. However, assessing such constructed responses in an automated fashion is a complex task and requires the application of natural language processing methodology. In this article, we implement and evaluate multiple machine learning models for coding energy knowledge in free-text responses of German K-12 students to items in formative science assessments which were conducted during synchronous online learning sessions. Dataset The dataset we collected for this purpose consists of German constructed responses from 38 different items dealing with aspects of energy such as manifestation and transformation. The units and items were implemented with the help of project-based pedagogy and evidence-centered design, and the responses were coded for seven core ideas concerning the manifestation and transformation of energy. The data was collected from students in seventh, eighth and ninth grade. Methodology We train various transformer- and feature-based models and compare their ability to recognize the respective ideas in students' writing. Moreover, as domain knowledge and its development can be formally modeled through knowledge networks, we evaluate how well the detection of the ideas within responses translated into accurate co-occurrence-based knowledge networks. Finally, in terms of the descriptive accuracy of our models, we inspect what features played a role for which prediction outcome and if the models pick up on undesired shortcuts. In addition to this, we analyze how much the models match human coders in what evidence within responses they consider important for their coding decisions. Results A model based on a modified GBERT-large can achieve the overall most promising results, although descriptive accuracy varies much more than predictive accuracy for the different ideas assessed. For reasons of comparability, we also evaluate the same machine learning architecture using the SciEntsBank 3-Way benchmark with an English RoBERTa-large model, where it achieves state-of-the-art results in two out of three evaluation categories.},\n\tlanguage = {en},\n\turldate = {2022-12-16},\n\tjournal = {Journal of Computer Assisted Learning},\n\tauthor = {Gombert, Sebastian and Di Mitri, Daniele and Karademir, Onur and Kubsch, Marcus and Kolbe, Hannah and Tautz, Simon and Grimm, Adrian and Bohm, Isabell and Neumann, Knut and Drachsler, Hendrik},\n\tyear = {2022},\n\tnote = {\\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jcal.12767},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/VE9BCRZ4/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Background Formative assessments are needed to enable monitoring how student knowledge develops throughout a unit. Constructed response items which require learners to formulate their own free-text responses are well suited for testing their active knowledge. However, assessing such constructed responses in an automated fashion is a complex task and requires the application of natural language processing methodology. In this article, we implement and evaluate multiple machine learning models for coding energy knowledge in free-text responses of German K-12 students to items in formative science assessments which were conducted during synchronous online learning sessions. Dataset The dataset we collected for this purpose consists of German constructed responses from 38 different items dealing with aspects of energy such as manifestation and transformation. The units and items were implemented with the help of project-based pedagogy and evidence-centered design, and the responses were coded for seven core ideas concerning the manifestation and transformation of energy. The data was collected from students in seventh, eighth and ninth grade. Methodology We train various transformer- and feature-based models and compare their ability to recognize the respective ideas in students' writing. Moreover, as domain knowledge and its development can be formally modeled through knowledge networks, we evaluate how well the detection of the ideas within responses translated into accurate co-occurrence-based knowledge networks. Finally, in terms of the descriptive accuracy of our models, we inspect what features played a role for which prediction outcome and if the models pick up on undesired shortcuts. In addition to this, we analyze how much the models match human coders in what evidence within responses they consider important for their coding decisions. Results A model based on a modified GBERT-large can achieve the overall most promising results, although descriptive accuracy varies much more than predictive accuracy for the different ideas assessed. For reasons of comparability, we also evaluate the same machine learning architecture using the SciEntsBank 3-Way benchmark with an English RoBERTa-large model, where it achieves state-of-the-art results in two out of three evaluation categories.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Introduction to Multimodal Learning Analytics.\n \n \n \n \n\n\n \n Giannakos, M.; Spikol, D.; Di Mitri, D.; Sharma, K.; Ochoa, X.; and Hammad, R.\n\n\n \n\n\n\n In . September 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Introduction paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{giannakos_introduction_2022,\n\ttitle = {Introduction to {Multimodal} {Learning} {Analytics}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-08075-3},\n\tabstract = {This chapter provides an introduction and an overview of this edited book on Multimodal Learning Analytics (MMLA). The goal of this book is to introduce the reader to the field of MMLA and provide a comprehensive overview of contemporary MMLA research. The contributions come from diverse contexts to support different objectives and stakeholders (e.g., learning scientists, policymakers, technologists). In this first introductory chapter, we present the history of MMLA and the various ongoing challenges, giving a brief overview of the contributions of the book, and conclude by highlighting the potential emerging technologies and practices connected with MMLA.},\n\tauthor = {Giannakos, Michail and Spikol, Daniel and Di Mitri, Daniele and Sharma, Kshitij and Ochoa, Xavier and Hammad, Rawad},\n\tmonth = sep,\n\tyear = {2022},\n\tdoi = {10.1007/978-3-031-08076-0_1},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/MCD8ZQW5/file/view}\n}\n\n\n\n
\n
\n\n\n
\n This chapter provides an introduction and an overview of this edited book on Multimodal Learning Analytics (MMLA). The goal of this book is to introduce the reader to the field of MMLA and provide a comprehensive overview of contemporary MMLA research. The contributions come from diverse contexts to support different objectives and stakeholders (e.g., learning scientists, policymakers, technologists). In this first introductory chapter, we present the history of MMLA and the various ongoing challenges, giving a brief overview of the contributions of the book, and conclude by highlighting the potential emerging technologies and practices connected with MMLA.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Privacy-Preserving and Scalable Affect Detection in Online Synchronous Learning.\n \n \n \n \n\n\n \n Böttger, F.; Cetinkaya, U.; Di Mitri, D.; Gombert, S.; Shingjergji, K.; Iren, D.; and Klemke, R.\n\n\n \n\n\n\n In Hilliger, I.; Muñoz-Merino, P. J.; De Laet, T.; Ortega-Arranz, A.; and Farrell, T., editor(s), Educating for a New Future: Making Sense of Technology-Enhanced Learning Adoption, volume 13450, of Lecture Notes in Computer Science, pages 45–58, Cham, 2022. Springer International Publishing\n Series Title: Lecture Notes in Computer Science\n\n\n\n
\n\n\n\n \n \n \"Privacy-PreservingPaper\n  \n \n \n \"Privacy-Preserving paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hilliger_privacy-preserving_2022,\n\taddress = {Cham},\n\tseries = {Lecture {Notes} in {Computer} {Science}},\n\ttitle = {Privacy-{Preserving} and {Scalable} {Affect} {Detection} in {Online} {Synchronous} {Learning}},\n\tvolume = {13450},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-16289-3},\n\turl = {https://link.springer.com/10.1007/978-3-031-16290-9_4},\n\tdoi = {10.1007/978-3-031-16290-9_4},\n\tlanguage = {en},\n\turldate = {2024-09-14},\n\tbooktitle = {Educating for a {New} {Future}: {Making} {Sense} of {Technology}-{Enhanced} {Learning} {Adoption}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Böttger, Felix and Cetinkaya, Ufuk and Di Mitri, Daniele and Gombert, Sebastian and Shingjergji, Krist and Iren, Deniz and Klemke, Roland},\n\teditor = {Hilliger, Isabel and Muñoz-Merino, Pedro J. and De Laet, Tinne and Ortega-Arranz, Alejandro and Farrell, Tracie},\n\tyear = {2022},\n\tdoi = {10.1007/978-3-031-16290-9_4},\n\tnote = {Series Title: Lecture Notes in Computer Science},\n\tpages = {45--58},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/AUS7PPAM/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal Immersive Learning Systems 2022.\n \n \n \n \n\n\n \n Sanusi, K. A. M.; Limbu, B.; Schneider, J.; Di Mitri, D.; and Klemke, R.,\n editors.\n \n\n\n \n\n\n\n Volume 3247 of CEUR Workshop ProceedingsCEUR, Toulouse, France, September 2022.\n ISSN: 1613-0073\n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n \n \"Multimodal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{sanusi_multimodal_2022,\n\taddress = {Toulouse, France},\n\tseries = {{CEUR} {Workshop} {Proceedings}},\n\ttitle = {Multimodal {Immersive} {Learning} {Systems} 2022},\n\tvolume = {3247},\n\tcopyright = {All rights reserved},\n\tshorttitle = {Preface {Summary}},\n\turl = {http://ceur-ws.org/Vol-3247/#preface},\n\tlanguage = {en},\n\turldate = {2022-10-24},\n\tpublisher = {CEUR},\n\teditor = {Sanusi, Khaleel Asyraaf Mat and Limbu, Bibeg and Schneider, Jan and Di Mitri, Daniele and Klemke, Roland},\n\tmonth = sep,\n\tyear = {2022},\n\tnote = {ISSN: 1613-0073},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/X8U5ZQCD/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Preface of MILeS 2022.\n \n \n \n \n\n\n \n Sanusi, K. A. M.; Limbu, B.; Schneider, J.; Di Mitri, D.; and Klemke, R.\n\n\n \n\n\n\n In CEUR Workshop Proceedings, volume 3247. CEUR-WS, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"PrefacePaper\n  \n \n \n \"Preface paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{sanusi_preface_2022,\n\ttitle = {Preface of {MILeS} 2022},\n\tvolume = {3247},\n\tcopyright = {All rights reserved},\n\turl = {https://research.tudelft.nl/files/136096658/preface.pdf},\n\turldate = {2024-09-14},\n\tbooktitle = {{CEUR} {Workshop} {Proceedings}},\n\tpublisher = {CEUR-WS},\n\tauthor = {Sanusi, Khaleel Asyraaf Mat and Limbu, Bibeg and Schneider, Jan and Di Mitri, Daniele and Klemke, Roland},\n\tyear = {2022},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/6P4GQNEF/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Multimodal Learning Analytics Handbook.\n \n \n \n \n\n\n \n Giannakos, M.; Spikol, D.; Di Mitri, D.; Sharma, K.; Ochoa, X.; and Hammad, R.,\n editors.\n \n\n\n \n\n\n\n Springer, S.l., 1st ed. 2022 edition edition, October 2022.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{giannakos_multimodal_2022,\n\taddress = {S.l.},\n\tedition = {1st ed. 2022 edition},\n\ttitle = {The {Multimodal} {Learning} {Analytics} {Handbook}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-08075-3},\n\tlanguage = {English},\n\tpublisher = {Springer},\n\teditor = {Giannakos, Michail and Spikol, Daniel and Di Mitri, Daniele and Sharma, Kshitij and Ochoa, Xavier and Hammad, Rawad},\n\tmonth = oct,\n\tyear = {2022},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/8KT93HN5/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reflecting on the Actionable Components of a Model for Augmented Feedback.\n \n \n \n \n\n\n \n Di Mitri, D.; Gombert, S.; and Karademir, O.\n\n\n \n\n\n\n In Sanusi, K. A. M.; Limbu, B.; Schneider, J.; Di Mitri, D.; and Klemke, R., editor(s), Proceedings of the Second International Workshop on Multimodal Immersive Learning Systems (MILeS 2022), volume 3247, of CEUR Workshop Proceedings, pages 45–50, Toulouse, France, September 2022. CEUR\n ISSN: 1613-0073\n\n\n\n
\n\n\n\n \n \n \"ReflectingPaper\n  \n \n \n \"Reflecting paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_reflecting_2022,\n\taddress = {Toulouse, France},\n\tseries = {{CEUR} {Workshop} {Proceedings}},\n\ttitle = {Reflecting on the {Actionable} {Components} of a {Model} for {Augmented} {Feedback}},\n\tvolume = {3247},\n\tcopyright = {Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License (CC-BY-NC-ND)},\n\turl = {http://ceur-ws.org/Vol-3247/#paper8},\n\tlanguage = {en},\n\turldate = {2022-10-24},\n\tbooktitle = {Proceedings of the {Second} {International} {Workshop} on {Multimodal} {Immersive} {Learning} {Systems} ({MILeS} 2022)},\n\tpublisher = {CEUR},\n\tauthor = {Di Mitri, Daniele and Gombert, Sebastian and Karademir, Onur},\n\teditor = {Sanusi, Khaleel Asyraaf Mat and Limbu, Bibeg and Schneider, Jan and Di Mitri, Daniele and Klemke, Roland},\n\tmonth = sep,\n\tyear = {2022},\n\tnote = {ISSN: 1613-0073},\n\tpages = {45--50},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/DKHT26ZY/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal Learning Experience for Deliberate Practice.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Limbu, B.; Mat Sanusi, K. A.; and Klemke, R.\n\n\n \n\n\n\n In Giannakos, M.; Spikol, D.; Di Mitri, D.; Sharma, K.; Ochoa, X.; and Hammad, R., editor(s), The Multimodal Learning Analytics Handbook, pages 183–204. Springer International Publishing, Cham, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n \n \"Multimodal paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{di_mitri_multimodal_2022,\n\taddress = {Cham},\n\ttitle = {Multimodal {Learning} {Experience} for {Deliberate} {Practice}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-08076-0},\n\turl = {https://doi.org/10.1007/978-3-031-08076-0_8},\n\tabstract = {While digital education technologies have improved to make educational resources more available, the modes of interaction they implement remain largely unnatural for the learner. Modern sensor-enabled computer systems allow extending human-computer interfaces for multimodal communication. Advances in Artificial Intelligence allow interpreting the data collected from multimodal and multi-sensor devices. These insights can be used to support deliberate practice with personalised feedback and adaptation through Multimodal Learning Experiences (MLX). This chapter elaborates on the approaches, architectures, and methodologies in five different use cases that use multimodal learning analytics applications for deliberate practice.},\n\tlanguage = {en},\n\turldate = {2022-11-12},\n\tbooktitle = {The {Multimodal} {Learning} {Analytics} {Handbook}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Limbu, Bibeg and Mat Sanusi, Khaleel Asyraaf and Klemke, Roland},\n\teditor = {Giannakos, Michail and Spikol, Daniel and Di Mitri, Daniele and Sharma, Kshitij and Ochoa, Xavier and Hammad, Rawad},\n\tyear = {2022},\n\tdoi = {10.1007/978-3-031-08076-0_8},\n\tpages = {183--204},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/U6QWHCM6/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n While digital education technologies have improved to make educational resources more available, the modes of interaction they implement remain largely unnatural for the learner. Modern sensor-enabled computer systems allow extending human-computer interfaces for multimodal communication. Advances in Artificial Intelligence allow interpreting the data collected from multimodal and multi-sensor devices. These insights can be used to support deliberate practice with personalised feedback and adaptation through Multimodal Learning Experiences (MLX). This chapter elaborates on the approaches, architectures, and methodologies in five different use cases that use multimodal learning analytics applications for deliberate practice.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Keep Me in the Loop: Real-Time Feedback with Multimodal Data.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; and Drachsler, H.\n\n\n \n\n\n\n International Journal of Artificial Intelligence in Education, 32(4): 1093–1118. December 2022.\n \n\n\n\n
\n\n\n\n \n \n \"KeepPaper\n  \n \n \n \"Keep paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{di_mitri_keep_2022,\n\ttitle = {Keep {Me} in the {Loop}: {Real}-{Time} {Feedback} with {Multimodal} {Data}},\n\tvolume = {32},\n\tcopyright = {All rights reserved},\n\tissn = {1560-4292, 1560-4306},\n\tshorttitle = {Keep {Me} in the {Loop}},\n\turl = {https://link.springer.com/10.1007/s40593-021-00281-z},\n\tdoi = {10.1007/s40593-021-00281-z},\n\tabstract = {Abstract\n            This paper describes the CPR Tutor, a real-time multimodal feedback system for cardiopulmonary resuscitation (CPR) training. The CPR Tutor detects training mistakes using recurrent neural networks. The CPR Tutor automatically recognises and assesses the quality of the chest compressions according to five CPR performance indicators. It detects training mistakes in real-time by analysing a multimodal data stream consisting of kinematic and electromyographic data. Based on this assessment, the CPR Tutor provides audio feedback to correct the most critical mistakes and improve the CPR performance. The mistake detection models of the CPR Tutor were trained using a dataset from 10 experts. Hence, we tested the validity of the CPR Tutor and the impact of its feedback functionality in a user study involving additional 10 participants. The CPR Tutor pushes forward the current state of the art of real-time multimodal tutors by providing: (1) an architecture design, (2) a methodological approach for delivering real-time feedback using multimodal data and (3) a field study on real-time feedback for CPR training. This paper details the results of a field study by quantitatively measuring the impact of the CPR Tutor feedback on the performance indicators and qualitatively analysing the participants’ questionnaire answers.},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2024-09-14},\n\tjournal = {International Journal of Artificial Intelligence in Education},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Drachsler, Hendrik},\n\tmonth = dec,\n\tyear = {2022},\n\tpages = {1093--1118},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/WIDP8RKE/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Abstract This paper describes the CPR Tutor, a real-time multimodal feedback system for cardiopulmonary resuscitation (CPR) training. The CPR Tutor detects training mistakes using recurrent neural networks. The CPR Tutor automatically recognises and assesses the quality of the chest compressions according to five CPR performance indicators. It detects training mistakes in real-time by analysing a multimodal data stream consisting of kinematic and electromyographic data. Based on this assessment, the CPR Tutor provides audio feedback to correct the most critical mistakes and improve the CPR performance. The mistake detection models of the CPR Tutor were trained using a dataset from 10 experts. Hence, we tested the validity of the CPR Tutor and the impact of its feedback functionality in a user study involving additional 10 participants. The CPR Tutor pushes forward the current state of the art of real-time multimodal tutors by providing: (1) an architecture design, (2) a methodological approach for delivering real-time feedback using multimodal data and (3) a field study on real-time feedback for CPR training. This paper details the results of a field study by quantitatively measuring the impact of the CPR Tutor feedback on the performance indicators and qualitatively analysing the participants’ questionnaire answers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Impact of Intelligent Tutoring Systems on Mathematics Achievement of Underachieving Students.\n \n \n \n \n\n\n \n Khazanchi, R.; Di Mitri, D.; and Drachsler, H.\n\n\n \n\n\n\n In Langran, E., editor(s), Proceedings of Society for Information Technology & Teacher Education International Conference 2022, pages 1524–1534, San Diego, CA, United States, April 2022. Association for the Advancement of Computing in Education (AACE)\n \n\n\n\n
\n\n\n\n \n \n \"ImpactPaper\n  \n \n \n \"Impact paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{khazanchi_impact_2022,\n\taddress = {San Diego, CA, United States},\n\ttitle = {Impact of {Intelligent} {Tutoring} {Systems} on {Mathematics} {Achievement} of {Underachieving} {Students}},\n\tcopyright = {All rights reserved},\n\turl = {https://www.learntechlib.org/p/220916},\n\tabstract = {The educational landscape has changed considerably in the past decade. The focus has shifted from using traditional teaching methods to integrating technology in today’s curricula. Much of the emphasis is on fostering 21st-century skills by using technology in new and innovative ways. Student engagement is a crucial component of learning. Intelligent Tutoring Systems (ITSs) are computer software that uses artificial intelligence to provide adaptive, customized learning based on students’ needs. This quasi-experimental research study explored the effect of ITSs, such as ALEKS, on underachieving students’ mathematics achievement among 8th-grade students. This research study compared the results of pretest and posttest between teacher-led instructions versus ALEKS -led instructions from the two consecutive years. During the first year, only McGraw’s curriculum “Reveal” was used with no use of ALEKS. In the second year...},\n\tbooktitle = {Proceedings of {Society} for {Information} {Technology} \\& {Teacher} {Education} {International} {Conference} 2022},\n\tpublisher = {Association for the Advancement of Computing in Education (AACE)},\n\tauthor = {Khazanchi, Rashmi and Di Mitri, Daniele and Drachsler, Hendrik},\n\teditor = {Langran, Elizabeth},\n\tmonth = apr,\n\tyear = {2022},\n\tpages = {1524--1534},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/APT27RJ2/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n The educational landscape has changed considerably in the past decade. The focus has shifted from using traditional teaching methods to integrating technology in today’s curricula. Much of the emphasis is on fostering 21st-century skills by using technology in new and innovative ways. Student engagement is a crucial component of learning. Intelligent Tutoring Systems (ITSs) are computer software that uses artificial intelligence to provide adaptive, customized learning based on students’ needs. This quasi-experimental research study explored the effect of ITSs, such as ALEKS, on underachieving students’ mathematics achievement among 8th-grade students. This research study compared the results of pretest and posttest between teacher-led instructions versus ALEKS -led instructions from the two consecutive years. During the first year, only McGraw’s curriculum “Reveal” was used with no use of ALEKS. In the second year...\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Rise of Multimodal Tutors in Education: Insights from Recent Research.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; and Drachsler, H.\n\n\n \n\n\n\n In Handbook of Open, Distance and Digital Education, pages 1–20. Springer Nature Singapore, Singapore, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{di_mitri_rise_2022,\n\taddress = {Singapore},\n\ttitle = {The {Rise} of {Multimodal} {Tutors} in {Education}: {Insights} from {Recent} {Research}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-981-19035-1-9},\n\tshorttitle = {The {Rise} of {Multimodal} {Tutors} in {Education}},\n\turl = {https://link.springer.com/10.1007/978-981-19-0351-9_58-1},\n\tabstract = {Abstract\n            This chapter describes the insights derived by the design and development of the Multimodal Tutor, a system that uses artificial intelligence for providing digital feedback and to support psychomotor skills acquisition. In this chapter, we discuss the insights which we gained from eight studies: (1) an exploratory study combining physiological data and learning performance (Learning Pulse); (2) a literature survey on multimodal data for learning and a conceptual model (the Multimodal Learning Analytics Model); (3) an analysis of the technical challenges of Multimodal Learning Analytics (the Big Five Challenges); (4) a technological framework for using multimodal data for learning (the Multimodal Pipeline); (5) a data collection and storing system for multimodal data (the Learning Hub); (6) a data annotation tool for multimodal data (the Visual Inspection Tool); (7) a case study in Cardiopulmonary Resuscitation training (CPR Tutor) consisting of a feasibility study for detecting CPR mistakes; and (8) a real-time feedback study.},\n\tlanguage = {en},\n\turldate = {2024-09-14},\n\tbooktitle = {Handbook of {Open}, {Distance} and {Digital} {Education}},\n\tpublisher = {Springer Nature Singapore},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Drachsler, Hendrik},\n\tyear = {2022},\n\tdoi = {10.1007/978-981-19-0351-9_58-1},\n\tpages = {1--20},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/9KC26S7T/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Abstract This chapter describes the insights derived by the design and development of the Multimodal Tutor, a system that uses artificial intelligence for providing digital feedback and to support psychomotor skills acquisition. In this chapter, we discuss the insights which we gained from eight studies: (1) an exploratory study combining physiological data and learning performance (Learning Pulse); (2) a literature survey on multimodal data for learning and a conceptual model (the Multimodal Learning Analytics Model); (3) an analysis of the technical challenges of Multimodal Learning Analytics (the Big Five Challenges); (4) a technological framework for using multimodal data for learning (the Multimodal Pipeline); (5) a data collection and storing system for multimodal data (the Learning Hub); (6) a data annotation tool for multimodal data (the Visual Inspection Tool); (7) a case study in Cardiopulmonary Resuscitation training (CPR Tutor) consisting of a feasibility study for detecting CPR mistakes; and (8) a real-time feedback study.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CROSSMMLA & SLE Workshop: Learning Analytics for Smart Learning Environments Crossing Physical and Virtual Learning Spaces.\n \n \n \n \n\n\n \n Spikol, D.; Lorenzo, M. L. B.; Cukurova, M.; Lavoue, E.; Giannakos, M.; Ochoa, X.; Di Mitri, D.; and Hernandez-Leo, D.\n\n\n \n\n\n\n Companion Proceedings of the 12th,173. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"CROSSMMLAPaper\n  \n \n \n \"CROSSMMLA paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{spikol_crossmmla_2022,\n\ttitle = {{CROSSMMLA} \\& {SLE} {Workshop}: {Learning} {Analytics} for {Smart} {Learning} {Environments} {Crossing} {Physical} and {Virtual} {Learning} {Spaces}},\n\tcopyright = {All rights reserved},\n\tshorttitle = {{CROSSMMLA} \\& {SLE} {Workshop}},\n\turl = {https://www.zora.uzh.ch/id/eprint/218604/1/LAK22_CompanionProceedings.pdf#page=184},\n\turldate = {2024-09-14},\n\tjournal = {Companion Proceedings of the 12th},\n\tauthor = {Spikol, Daniel and Lorenzo, Miguel L. Bote and Cukurova, Multu and Lavoue, Elise and Giannakos, Michail and Ochoa, Xavier and Di Mitri, Daniele and Hernandez-Leo, Davinia},\n\tyear = {2022},\n\tpages = {173},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/WB6B294L/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the Second International Workshop on Multimodal Immersive Learning Systems (MILeS 2022).\n \n \n \n \n\n\n \n Sanusi, K. A. M.; Limbu, B.; Schneider, J.; Di Mitri, D.; and Klemke, R.\n\n\n \n\n\n\n Volume 3247 of CEUR Workshop ProceedingsCEUR, Toulouse, France, September 2022.\n ISSN: 1613-0073\n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n \n \"Proceedings paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{sanusi_proceedings_2022,\n\taddress = {Toulouse, France},\n\tseries = {{CEUR} {Workshop} {Proceedings}},\n\ttitle = {Proceedings of the {Second} {International} {Workshop} on {Multimodal} {Immersive} {Learning} {Systems} ({MILeS} 2022)},\n\tvolume = {3247},\n\tcopyright = {All rights reserved},\n\turl = {https://ceur-ws.org/Vol-3247/},\n\tlanguage = {en},\n\turldate = {2022-11-30},\n\tpublisher = {CEUR},\n\tauthor = {Sanusi, Khaleel Asyraaf Mat and Limbu, Bibeg and Schneider, Jan and Di Mitri, Daniele and Klemke, Roland},\n\tmonth = sep,\n\tyear = {2022},\n\tnote = {ISSN: 1613-0073},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/FPNW9SZC/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Superpowers in the Classroom: Hyperchalk is an Online Whiteboard for Learning Analytics Data Collection.\n \n \n \n\n\n \n Menzel, L.; Gombert, S.; Di Mitri, D.; and Drachsler, H.\n\n\n \n\n\n\n In Hilliger, I.; Muñoz-Merino, P. J.; De Laet, T.; Ortega-Arranz, A.; and Farrell, T., editor(s), Educating for a New Future: Making Sense of Technology-Enhanced Learning Adoption, of Lecture Notes in Computer Science, pages 463–469, Cham, 2022. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{menzel_superpowers_2022,\n\taddress = {Cham},\n\tseries = {Lecture {Notes} in {Computer} {Science}},\n\ttitle = {Superpowers in the {Classroom}: {Hyperchalk} is an {Online} {Whiteboard} for {Learning} {Analytics} {Data} {Collection}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-16290-9},\n\tshorttitle = {Superpowers in the {Classroom}},\n\tdoi = {10.1007/978-3-031-16290-9_37},\n\tabstract = {In e-learning, collaborative online whiteboards have become a popular choice for implementing collaborative learning. However, existing solutions fail to deliver data in a way that allows for the application of learning analytics in this field. While the market offers several solutions, most of them cannot be integrated with existing learning management systems and do not provide data that can be used for learning analytics. To overcome this, we implement a digital collaborative whiteboard based on the open source Excalidraw and a custom back-end. The whiteboard can be self-hosted, it collects rich log data appropriate for learning analytics purposes and it integrates with learning management systems – such as Moodle – using the LTI 1.3 Advantage standard.},\n\tlanguage = {en},\n\tbooktitle = {Educating for a {New} {Future}: {Making} {Sense} of {Technology}-{Enhanced} {Learning} {Adoption}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Menzel, Lukas and Gombert, Sebastian and Di Mitri, Daniele and Drachsler, Hendrik},\n\teditor = {Hilliger, Isabel and Muñoz-Merino, Pedro J. and De Laet, Tinne and Ortega-Arranz, Alejandro and Farrell, Tracie},\n\tyear = {2022},\n\tpages = {463--469},\n}\n\n\n\n
\n
\n\n\n
\n In e-learning, collaborative online whiteboards have become a popular choice for implementing collaborative learning. However, existing solutions fail to deliver data in a way that allows for the application of learning analytics in this field. While the market offers several solutions, most of them cannot be integrated with existing learning management systems and do not provide data that can be used for learning analytics. To overcome this, we implement a digital collaborative whiteboard based on the open source Excalidraw and a custom back-end. The whiteboard can be self-hosted, it collects rich log data appropriate for learning analytics purposes and it integrates with learning management systems – such as Moodle – using the LTI 1.3 Advantage standard.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Designing the Learning Analytics Cockpit - A Dashboard that Enables Interventions.\n \n \n \n \n\n\n \n Karademir, O.; Ahmad, A.; Schneider, J.; Di Mitri, D.; Jivet, I.; and Drachsler, H.\n\n\n \n\n\n\n In De la Prieta, F.; Gennari, R.; Temperini, M.; Di Mascio, T.; Vittorini, P.; Kubincova, Z.; Popescu, E.; Rua Carneiro, D.; Lancia, L.; and Addone, A., editor(s), Methodologies and Intelligent Systems for Technology Enhanced Learning, 11th International Conference, of Lecture Notes in Networks and Systems, pages 95–104, Cham, 2022. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"Designing paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{karademir_designing_2022,\n\taddress = {Cham},\n\tseries = {Lecture {Notes} in {Networks} and {Systems}},\n\ttitle = {Designing the {Learning} {Analytics} {Cockpit} - {A} {Dashboard} that {Enables} {Interventions}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-030-86618-1},\n\tdoi = {10.1007/978-3-030-86618-1_10},\n\tabstract = {This paper presents results from our design and evaluation studies of the Learning Analytics Cockpit (LA Cockpit) for a quiz app, which aims to provide lecturers with important information about students’ knowledge levels. We define a LA Cockpit as a tool for instructors that enables them to steer students’ learning process by providing a LA Dashboard which visualizes students’ learning indicators and an intervention feature enabling instructors to give feedback based on students’ knowledge levels. To address the needs of lecturers we applied the Double Diamond (DD) design process model which consists of four stages: discover, define, develop \\& refine. Following the DD process, we first conducted a qualitative study by interviewing four lecturers and student teachers to discover their needs. Results from the interviews allowed us to define requirements of the lecturers. We used these results to develop the first version of the tool where we refined it through informal feedback by the interviewed teachers. In preparation for a larger effectiveness-study, we evaluated the LA Cockpit in terms of usefulness and usability in a preliminary study with 16 university lecturers. Results from this qualitative study indicate that the LA Cockpit can measure the students’ knowledge level and supports self-reflection for lecturers. Moreover, results show that the LA Cockpit enables lecturers to address knowledge gaps and provide interventions to students before the exams.},\n\tlanguage = {en},\n\tbooktitle = {Methodologies and {Intelligent} {Systems} for {Technology} {Enhanced} {Learning}, 11th {International} {Conference}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Karademir, Onur and Ahmad, Atezaz and Schneider, Jan and Di Mitri, Daniele and Jivet, Ioana and Drachsler, Hendrik},\n\teditor = {De la Prieta, Fernando and Gennari, Rosella and Temperini, Marco and Di Mascio, Tania and Vittorini, Pierpaolo and Kubincova, Zuzana and Popescu, Elvira and Rua Carneiro, Davide and Lancia, Loreto and Addone, Agnese},\n\tyear = {2022},\n\tpages = {95--104},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/BWR2DNLN/file/view}\n}\n\n\n\n
\n
\n\n\n
\n This paper presents results from our design and evaluation studies of the Learning Analytics Cockpit (LA Cockpit) for a quiz app, which aims to provide lecturers with important information about students’ knowledge levels. We define a LA Cockpit as a tool for instructors that enables them to steer students’ learning process by providing a LA Dashboard which visualizes students’ learning indicators and an intervention feature enabling instructors to give feedback based on students’ knowledge levels. To address the needs of lecturers we applied the Double Diamond (DD) design process model which consists of four stages: discover, define, develop & refine. Following the DD process, we first conducted a qualitative study by interviewing four lecturers and student teachers to discover their needs. Results from the interviews allowed us to define requirements of the lecturers. We used these results to develop the first version of the tool where we refined it through informal feedback by the interviewed teachers. In preparation for a larger effectiveness-study, we evaluated the LA Cockpit in terms of usefulness and usability in a preliminary study with 16 university lecturers. Results from this qualitative study indicate that the LA Cockpit can measure the students’ knowledge level and supports self-reflection for lecturers. Moreover, results show that the LA Cockpit enables lecturers to address knowledge gaps and provide interventions to students before the exams.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Proceedings of the First International Workshop on Multimodal Artificial Intelligence in Education (MAIED 2021).\n \n \n \n \n\n\n \n Di Mitri, D.; Martinez-Maldonado, R.; Santos, O. C.; Schneider, J.; Sanusi, K. A.; Cukurova, M.; Spikol, D.; Molenaar, I.; Giannakos, M.; Klemke, R.; and Azevedo, R.\n\n\n \n\n\n\n In volume Vol-2902, pages 112. CEUR Workshop Proceedings, Aachen, Germany, July 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n \n \"Proceedings paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{di_mitri_proceedings_2021,\n\taddress = {Aachen, Germany},\n\ttitle = {Proceedings of the {First} {International} {Workshop} on {Multimodal} {Artificial} {Intelligence} in {Education} ({MAIED} 2021)},\n\tvolume = {Vol-2902},\n\tcopyright = {Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License (CC-BY-NC-ND)},\n\tisbn = {ISSN 1613-0073},\n\turl = {http://ceur-ws.org/Vol-2902/},\n\turldate = {2021-07-09},\n\tpublisher = {CEUR Workshop Proceedings},\n\tauthor = {Di Mitri, Daniele and Martinez-Maldonado, Roberto and Santos, Olga C. and Schneider, Jan and Sanusi, Khaleel Asyraaf and Cukurova, Mutlu and Spikol, Daniel and Molenaar, Inge and Giannakos, Michail and Klemke, Roland and Azevedo, Roger},\n\tmonth = jul,\n\tyear = {2021},\n\tpages = {112},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/L33XMIAN/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CROSSMMLA Futures: Collecting and analysing multimodal data across the physical and the virtual.\n \n \n \n \n\n\n \n Spikol, D.; Ochoa, X.; Worsley, M.; Di Mitri, D.; Cukurova, M.; Martinez-Maldonado, R.; and Schneider, J.\n\n\n \n\n\n\n In https://www. solaresearch. org/wp-content/uploads/2021/04/LAK21_CompanionProceedings. pdf, volume 2021. 11th International Learning Analytics and Knowledge Conference (LAK’21), 2021.\n \n\n\n\n
\n\n\n\n \n \n \"CROSSMMLAPaper\n  \n \n \n \"CROSSMMLA paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{spikol_crossmmla_2021,\n\ttitle = {{CROSSMMLA} {Futures}: {Collecting} and analysing multimodal data across the physical and the virtual},\n\tvolume = {2021},\n\tcopyright = {All rights reserved},\n\tshorttitle = {{CROSSMMLA} {Futures}},\n\turl = {https://discovery.ucl.ac.uk/id/eprint/10126482/},\n\turldate = {2024-09-14},\n\tbooktitle = {https://www. solaresearch. org/wp-content/uploads/2021/04/{LAK21}\\_CompanionProceedings. pdf},\n\tpublisher = {11th International Learning Analytics and Knowledge Conference (LAK’21)},\n\tauthor = {Spikol, Daniel and Ochoa, Xavier and Worsley, M. and Di Mitri, Daniele and Cukurova, Mutlu and Martinez-Maldonado, Roberto and Schneider, Jan},\n\tyear = {2021},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/GD38UJ3X/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MOBIUS: Smart Mobility Tracking with Smartphone Sensors.\n \n \n \n \n\n\n \n Di Mitri, D.; Asyraaf Mat Sanusi, K.; Trebing, K.; and Bromuri, S.\n\n\n \n\n\n\n In Paiva, S.; Lopes, S. I.; Zitouni, R.; Gupta, N.; Lopes, S. F.; and Yonezawa, T., editor(s), Science and Technologies for Smart Cities, of Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering, pages 462–475, Cham, 2021. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"MOBIUS: paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_mobius:_2021,\n\taddress = {Cham},\n\tseries = {Lecture {Notes} of the {Institute} for {Computer} {Sciences}, {Social} {Informatics} and {Telecommunications} {Engineering}},\n\ttitle = {{MOBIUS}: {Smart} {Mobility} {Tracking} with {Smartphone} {Sensors}},\n\tcopyright = {Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License (CC-BY-NC-ND)},\n\tisbn = {978-3-030-76063-2},\n\tshorttitle = {{MOBIUS}},\n\tdoi = {10.1007/978-3-030-76063-2_31},\n\tabstract = {In this paper we introduce MOBIUS, a smartphone-based system for remote tracking of citizens’ movements. By collecting smartphone’s sensor data such as accelerometer and gyroscope, along with self-report data, the MOBIUS system allows to classify the users’ mode of transportation. With the MOBIUS app the users can also activate GPS tracking to visualise their journeys and travelling speed on a map. The MOBIUS app is an example of a tracing app which can provide more insights into how people move around in an urban area. In this paper, we introduce the motivation, the architectural design and development of the MOBIUS app. To further test its validity, we run a user study collecting data from multiple users. The collected data are used to train a deep convolutional neural network architecture which classifies the transportation modes using with a mean accuracy of 89\\%.},\n\tlanguage = {en},\n\tbooktitle = {Science and {Technologies} for {Smart} {Cities}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Di Mitri, Daniele and Asyraaf Mat Sanusi, Khaleel and Trebing, Kevin and Bromuri, Stefano},\n\teditor = {Paiva, Sara and Lopes, Sérgio Ivan and Zitouni, Rafik and Gupta, Nishu and Lopes, Sérgio F. and Yonezawa, Takuro},\n\tyear = {2021},\n\tpages = {462--475},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/E9FHSZAE/file/view}\n}\n\n\n\n
\n
\n\n\n
\n In this paper we introduce MOBIUS, a smartphone-based system for remote tracking of citizens’ movements. By collecting smartphone’s sensor data such as accelerometer and gyroscope, along with self-report data, the MOBIUS system allows to classify the users’ mode of transportation. With the MOBIUS app the users can also activate GPS tracking to visualise their journeys and travelling speed on a map. The MOBIUS app is an example of a tracing app which can provide more insights into how people move around in an urban area. In this paper, we introduce the motivation, the architectural design and development of the MOBIUS app. To further test its validity, we run a user study collecting data from multiple users. The collected data are used to train a deep convolutional neural network architecture which classifies the transportation modes using with a mean accuracy of 89%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Table Tennis Tutor: Forehand Strokes Classification Based on Multimodal Data and Neural Networks.\n \n \n \n \n\n\n \n Mat Sanusi, K. A.; Di Mitri, D.; Limbu, B.; and Klemke, R.\n\n\n \n\n\n\n Sensors, 21(9): 3121. January 2021.\n Number: 9 Publisher: Multidisciplinary Digital Publishing Institute\n\n\n\n
\n\n\n\n \n \n \"TablePaper\n  \n \n \n \"Table paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{mat_sanusi_table_2021,\n\ttitle = {Table {Tennis} {Tutor}: {Forehand} {Strokes} {Classification} {Based} on {Multimodal} {Data} and {Neural} {Networks}},\n\tvolume = {21},\n\tcopyright = {http://creativecommons.org/licenses/by/3.0/},\n\tshorttitle = {Table {Tennis} {Tutor}},\n\turl = {https://www.mdpi.com/1424-8220/21/9/3121},\n\tdoi = {10.3390/s21093121},\n\tabstract = {Beginner table-tennis players require constant real-time feedback while learning the fundamental techniques. However, due to various constraints such as the mentor’s inability to be around all the time, expensive sensors and equipment for sports training, beginners are unable to get the immediate real-time feedback they need during training. Sensors have been widely used to train beginners and novices for various skills development, including psychomotor skills. Sensors enable the collection of multimodal data which can be utilised with machine learning to classify training mistakes, give feedback, and further improve the learning outcomes. In this paper, we introduce the Table Tennis Tutor (T3), a multi-sensor system consisting of a smartphone device with its built-in sensors for collecting motion data and a Microsoft Kinect for tracking body position. We focused on the forehand stroke mistake detection. We collected a dataset recording an experienced table tennis player performing 260 short forehand strokes (correct) and mimicking 250 long forehand strokes (mistake). We analysed and annotated the multimodal data for training a recurrent neural network that classifies correct and incorrect strokes. To investigate the accuracy level of the aforementioned sensors, three combinations were validated in this study: smartphone sensors only, the Kinect only, and both devices combined. The results of the study show that smartphone sensors alone perform sub-par than the Kinect, but similar with better precision together with the Kinect. To further strengthen T3’s potential for training, an expert interview session was held virtually with a table tennis coach to investigate the coach’s perception of having a real-time feedback system to assist beginners during training sessions. The outcome of the interview shows positive expectations and provided more inputs that can be beneficial for the future implementations of the T3.},\n\tlanguage = {en},\n\tnumber = {9},\n\turldate = {2021-05-12},\n\tjournal = {Sensors},\n\tauthor = {Mat Sanusi, Khaleel Asyraaf and Di Mitri, Daniele and Limbu, Bibeg and Klemke, Roland},\n\tmonth = jan,\n\tyear = {2021},\n\tnote = {Number: 9\nPublisher: Multidisciplinary Digital Publishing Institute},\n\tpages = {3121},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/FUR447F4/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Beginner table-tennis players require constant real-time feedback while learning the fundamental techniques. However, due to various constraints such as the mentor’s inability to be around all the time, expensive sensors and equipment for sports training, beginners are unable to get the immediate real-time feedback they need during training. Sensors have been widely used to train beginners and novices for various skills development, including psychomotor skills. Sensors enable the collection of multimodal data which can be utilised with machine learning to classify training mistakes, give feedback, and further improve the learning outcomes. In this paper, we introduce the Table Tennis Tutor (T3), a multi-sensor system consisting of a smartphone device with its built-in sensors for collecting motion data and a Microsoft Kinect for tracking body position. We focused on the forehand stroke mistake detection. We collected a dataset recording an experienced table tennis player performing 260 short forehand strokes (correct) and mimicking 250 long forehand strokes (mistake). We analysed and annotated the multimodal data for training a recurrent neural network that classifies correct and incorrect strokes. To investigate the accuracy level of the aforementioned sensors, three combinations were validated in this study: smartphone sensors only, the Kinect only, and both devices combined. The results of the study show that smartphone sensors alone perform sub-par than the Kinect, but similar with better precision together with the Kinect. To further strengthen T3’s potential for training, an expert interview session was held virtually with a table tennis coach to investigate the coach’s perception of having a real-time feedback system to assist beginners during training sessions. The outcome of the interview shows positive expectations and provided more inputs that can be beneficial for the future implementations of the T3.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mobile Sensing with Smart Wearables of the Physical Context of Distance Learning Students to Consider Its Effects on Learning.\n \n \n \n \n\n\n \n Ciordas-Hertel, G.; Rödling, S.; Schneider, J.; Di Mitri, D.; Weidlich, J.; and Drachsler, H.\n\n\n \n\n\n\n Sensors, 21(19): 6649. January 2021.\n Number: 19 Publisher: Multidisciplinary Digital Publishing Institute\n\n\n\n
\n\n\n\n \n \n \"MobilePaper\n  \n \n \n \"Mobile paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ciordas-hertel_mobile_2021,\n\ttitle = {Mobile {Sensing} with {Smart} {Wearables} of the {Physical} {Context} of {Distance} {Learning} {Students} to {Consider} {Its} {Effects} on {Learning}},\n\tvolume = {21},\n\tcopyright = {http://creativecommons.org/licenses/by/3.0/},\n\tissn = {1424-8220},\n\turl = {https://www.mdpi.com/1424-8220/21/19/6649},\n\tdoi = {10.3390/s21196649},\n\tabstract = {Research shows that various contextual factors can have an impact on learning. Some of these factors can originate from the physical learning environment (PLE) in this regard. When learning from home, learners have to organize their PLE by themselves. This paper is concerned with identifying, measuring, and collecting factors from the PLE that may affect learning using mobile sensing. More specifically, this paper first investigates which factors from the PLE can affect distance learning. The results identify nine types of factors from the PLE associated with cognitive, physiological, and affective effects on learning. Subsequently, this paper examines which instruments can be used to measure the investigated factors. The results highlight several methods involving smart wearables (SWs) to measure these factors from PLEs successfully. Third, this paper explores how software infrastructure can be designed to measure, collect, and process the identified multimodal data from and about the PLE by utilizing mobile sensing. The design and implementation of the Edutex software infrastructure described in this paper will enable learning analytics stakeholders to use data from and about the learners’ physical contexts. Edutex achieves this by utilizing sensor data from smartphones and smartwatches, in addition to response data from experience samples and questionnaires from learners’ smartwatches. Finally, this paper evaluates to what extent the developed infrastructure can provide relevant information about the learning context in a field study with 10 participants. The evaluation demonstrates how the software infrastructure can contextualize multimodal sensor data, such as lighting, ambient noise, and location, with user responses in a reliable, efficient, and protected manner.},\n\tlanguage = {en},\n\tnumber = {19},\n\turldate = {2022-05-09},\n\tjournal = {Sensors},\n\tauthor = {Ciordas-Hertel, George-Petru and Rödling, Sebastian and Schneider, Jan and Di Mitri, Daniele and Weidlich, Joshua and Drachsler, Hendrik},\n\tmonth = jan,\n\tyear = {2021},\n\tnote = {Number: 19\nPublisher: Multidisciplinary Digital Publishing Institute},\n\tpages = {6649},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/5JEW3Q35/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Research shows that various contextual factors can have an impact on learning. Some of these factors can originate from the physical learning environment (PLE) in this regard. When learning from home, learners have to organize their PLE by themselves. This paper is concerned with identifying, measuring, and collecting factors from the PLE that may affect learning using mobile sensing. More specifically, this paper first investigates which factors from the PLE can affect distance learning. The results identify nine types of factors from the PLE associated with cognitive, physiological, and affective effects on learning. Subsequently, this paper examines which instruments can be used to measure the investigated factors. The results highlight several methods involving smart wearables (SWs) to measure these factors from PLEs successfully. Third, this paper explores how software infrastructure can be designed to measure, collect, and process the identified multimodal data from and about the PLE by utilizing mobile sensing. The design and implementation of the Edutex software infrastructure described in this paper will enable learning analytics stakeholders to use data from and about the learners’ physical contexts. Edutex achieves this by utilizing sensor data from smartphones and smartwatches, in addition to response data from experience samples and questionnaires from learners’ smartwatches. Finally, this paper evaluates to what extent the developed infrastructure can provide relevant information about the learning context in a field study with 10 participants. The evaluation demonstrates how the software infrastructure can contextualize multimodal sensor data, such as lighting, ambient noise, and location, with user responses in a reliable, efficient, and protected manner.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Are We There Yet? - A Systematic Literature Review on Chatbots in Education.\n \n \n \n \n\n\n \n Wollny, S.; Schneider, J.; Di Mitri, D.; Weidlich, J.; Rittberger, M.; and Drachsler, H.\n\n\n \n\n\n\n Frontiers in Artificial Intelligence, 4. 2021.\n Publisher: Frontiers\n\n\n\n
\n\n\n\n \n \n \"ArePaper\n  \n \n \n \"Are paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wollny_are_2021,\n\ttitle = {Are {We} {There} {Yet}? - {A} {Systematic} {Literature} {Review} on {Chatbots} in {Education}},\n\tvolume = {4},\n\tcopyright = {All rights reserved},\n\tissn = {2624-8212},\n\tshorttitle = {Are {We} {There} {Yet}?},\n\turl = {https://www.frontiersin.org/articles/10.3389/frai.2021.654924/full?&utm_source=Email_to_authors_&utm_medium=Email&utm_content=T1_11.5e1_author&utm_campaign=Email_publication&field=&journalName=Frontiers_in_Artificial_Intelligence&id=654924},\n\tdoi = {10.3389/frai.2021.654924},\n\tabstract = {Chatbots are a promising technology with the potential to enhance workplaces and everyday life. In terms of scalability and accessibility, they also offer unique possibilities as communication and information tools for digital learning. In this paper, we present a systematic literature review investigating the areas of education where chatbots have already been applied, explore the pedagogical roles of chatbots, the use of chatbots for mentoring purposes, and their potential to personalize education. We conducted a preliminary analysis of 2678 publications to perform this literature review, which allowed us to identify 74 relevant publications for chatbots’ application in education. Through this, we address five research questions that, together, allow us to explore the current state-of-the-art of this educational technology. We conclude our systematic review by pointing to three main research challenges: 1. Aligning chatbot evaluations with implementation objectives, 2. Exploring the potential of chatbots for mentoring students, and 3. Exploring and leveraging adaptation capabilities of chatbots. For all three challenges, we discuss opportunities for future research.},\n\tlanguage = {English},\n\turldate = {2021-07-15},\n\tjournal = {Frontiers in Artificial Intelligence},\n\tauthor = {Wollny, Sebastian and Schneider, Jan and Di Mitri, Daniele and Weidlich, Joshua and Rittberger, Marc and Drachsler, Hendrik},\n\tyear = {2021},\n\tnote = {Publisher: Frontiers},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/E4HAWUCM/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Chatbots are a promising technology with the potential to enhance workplaces and everyday life. In terms of scalability and accessibility, they also offer unique possibilities as communication and information tools for digital learning. In this paper, we present a systematic literature review investigating the areas of education where chatbots have already been applied, explore the pedagogical roles of chatbots, the use of chatbots for mentoring purposes, and their potential to personalize education. We conducted a preliminary analysis of 2678 publications to perform this literature review, which allowed us to identify 74 relevant publications for chatbots’ application in education. Through this, we address five research questions that, together, allow us to explore the current state-of-the-art of this educational technology. We conclude our systematic review by pointing to three main research challenges: 1. Aligning chatbot evaluations with implementation objectives, 2. Exploring the potential of chatbots for mentoring students, and 3. Exploring and leveraging adaptation capabilities of chatbots. For all three challenges, we discuss opportunities for future research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A short history, emerging challenges and co-operation structures for Artificial Intelligence in Education.\n \n \n \n \n\n\n \n Mavrikis, M.; Cukurova, M.; Di Mitri, D.; Schneider, J.; and Drachsler, H.\n\n\n \n\n\n\n In Bildung und Erziehung, volume 74, pages 249–263. Vandenhoeck & Ruprecht, August 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{mavrikis_short_2021,\n\ttitle = {A short history, emerging challenges and co-operation structures for {Artificial} {Intelligence} in {Education}},\n\tvolume = {74},\n\tcopyright = {All rights reserved},\n\turl = {https://www.vr-elibrary.de/doi/abs/10.13109/buer.2021.74.3.249},\n\tabstract = {To accompany the special issue in Artificial Intelligence and Education, this article presents a short history of research in the field and summarises emerging challenges.We highlight key paradigm shifts that are becoming possible but also the need to pay attention to theory, implementation and pedagogy while adhering to ethical principles. We conclude by drawing attention to international co-operation structures in the field that can support the interdiscipniary perspectives and methods required to undertake research in the area.},\n\turldate = {2021-09-16},\n\tbooktitle = {Bildung und {Erziehung}},\n\tpublisher = {Vandenhoeck \\& Ruprecht},\n\tauthor = {Mavrikis, Manolis and Cukurova, Mutlu and Di Mitri, Daniele and Schneider, Jan and Drachsler, Hendrik},\n\tmonth = aug,\n\tyear = {2021},\n\tpages = {249--263},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/KHTNGTJ4/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n To accompany the special issue in Artificial Intelligence and Education, this article presents a short history of research in the field and summarises emerging challenges.We highlight key paradigm shifts that are becoming possible but also the need to pay attention to theory, implementation and pedagogy while adhering to ethical principles. We conclude by drawing attention to international co-operation structures in the field that can support the interdiscipniary perspectives and methods required to undertake research in the area.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the First International Workshop on Multimodal Immersive Learning Systems (MILeS 2021).\n \n \n \n \n\n\n \n Klemke, R.; Sanusi, K. A. M.; Majonica, D.; Richert, A.; Varney, V.; Keller, T.; Schneider, J.; Mitri, D. D.; Ciordas-Hertel, G.; Cardenas-Hernandez, F. P.; Romano, G.; Kravčík, M.; Paaßen, B.; Klamma, R.; Slupczynski, M.; Klatt, S.; Geisen, M.; Baumgartner, T.; and Riedl, N.\n\n\n \n\n\n\n Volume 2979 of CEUR Workshop ProceedingsCEUR, Online (Bozen-Bolzano, Italy), September 2021.\n ISSN: 1613-0073\n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n \n \"Proceedings paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{klemke_proceedings_2021,\n\taddress = {Online (Bozen-Bolzano, Italy)},\n\tseries = {{CEUR} {Workshop} {Proceedings}},\n\ttitle = {Proceedings of the {First} {International} {Workshop} on {Multimodal} {Immersive} {Learning} {Systems} ({MILeS} 2021)},\n\tvolume = {2979},\n\tcopyright = {All rights reserved},\n\turl = {https://ceur-ws.org/Vol-2979/},\n\tlanguage = {en},\n\turldate = {2022-11-30},\n\tpublisher = {CEUR},\n\tauthor = {Klemke, Roland and Sanusi, Khaleel Asyraaf Mat and Majonica, Daniel and Richert, Anja and Varney, Valérie and Keller, Tobias and Schneider, Jan and Mitri, Daniele Di and Ciordas-Hertel, George-Petru and Cardenas-Hernandez, Fernando P. and Romano, Gianluca and Kravčík, Miloš and Paaßen, Benjamin and Klamma, Ralf and Slupczynski, Michal and Klatt, Stefanie and Geisen, Mai and Baumgartner, Tobias and Riedl, Nina},\n\tmonth = sep,\n\tyear = {2021},\n\tnote = {ISSN: 1613-0073},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/JRLM5XY8/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analysis of the “D’oh!” Moments. Physiological Markers of Performance in Cognitive Switching Tasks.\n \n \n \n \n\n\n \n Buraha, T.; Schneider, J.; Di Mitri, D.; and Schiffner, D.\n\n\n \n\n\n\n In De Laet, T.; Klemke, R.; Alario-Hoyos, C.; Hilliger, I.; and Ortega-Arranz, A., editor(s), Technology-Enhanced Learning for a Free, Safe, and Sustainable World, of Lecture Notes in Computer Science, pages 137–148, Cham, 2021. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"Analysis paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{buraha_analysis_2021,\n\taddress = {Cham},\n\tseries = {Lecture {Notes} in {Computer} {Science}},\n\ttitle = {Analysis of the “{D}’oh!” {Moments}. {Physiological} {Markers} of {Performance} in {Cognitive} {Switching} {Tasks}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-030-86436-1},\n\tdoi = {10.1007/978-3-030-86436-1_11},\n\tabstract = {The link between the body and mind has fascinated philosophers and scientists for ages. The increasing availability of sensor technologies has enabled the possibility to explore this link even deeper, providing some evidence that certain physiological measurements such as galvanic skin response can have in the performance of different learning activities. In this paper, we explore the link between learners’ performance of cognitive tasks and their physiological state with the use of Multimodal Learning Analytics (MMLA). We used MMLA tools and techniques to collect, annotate, and analyse physiological data from 16 participants wearing an Empatica E4 wristband while engaging in task-switching cognitive exercises. The collected data include temperature, blood volume pulse, heart rate variability, galvanic skin response, and screen recording from each participant while performing the exercises. To examine the link between cognitive performance we applied a preliminary qualitative analysis to galvanic skin response and tested different Artificial Intelligence techniques to differentiate between productive and unproductive performance.},\n\tlanguage = {en},\n\tbooktitle = {Technology-{Enhanced} {Learning} for a {Free}, {Safe}, and {Sustainable} {World}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Buraha, Tetiana and Schneider, Jan and Di Mitri, Daniele and Schiffner, Daniel},\n\teditor = {De Laet, Tinne and Klemke, Roland and Alario-Hoyos, Carlos and Hilliger, Isabel and Ortega-Arranz, Alejandro},\n\tyear = {2021},\n\tpages = {137--148},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/BKBWMYI5/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n The link between the body and mind has fascinated philosophers and scientists for ages. The increasing availability of sensor technologies has enabled the possibility to explore this link even deeper, providing some evidence that certain physiological measurements such as galvanic skin response can have in the performance of different learning activities. In this paper, we explore the link between learners’ performance of cognitive tasks and their physiological state with the use of Multimodal Learning Analytics (MMLA). We used MMLA tools and techniques to collect, annotate, and analyse physiological data from 16 participants wearing an Empatica E4 wristband while engaging in task-switching cognitive exercises. The collected data include temperature, blood volume pulse, heart rate variability, galvanic skin response, and screen recording from each participant while performing the exercises. To examine the link between cognitive performance we applied a preliminary qualitative analysis to galvanic skin response and tested different Artificial Intelligence techniques to differentiate between productive and unproductive performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Incorporating Social–Emotional Learning to Build Positive Behaviors.\n \n \n \n\n\n \n Khazanchi, R.; Khazanchi, P.; Mehta, V.; and Tuli, N.\n\n\n \n\n\n\n Kappa Delta Pi Record, 57: 11–17. January 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{khazanchi_incorporating_2021,\n\ttitle = {Incorporating {Social}–{Emotional} {Learning} to {Build} {Positive} {Behaviors}},\n\tvolume = {57},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1080/00228958.2021.1851581},\n\tabstract = {The authors explore the role of social–emotional learning in developing positive behaviors in students and the related challenges faced by teachers in the United States and India.},\n\tjournal = {Kappa Delta Pi Record},\n\tauthor = {Khazanchi, Rashmi and Khazanchi, Pankaj and Mehta, Vinita and Tuli, Neetu},\n\tmonth = jan,\n\tyear = {2021},\n\tpages = {11--17},\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n The authors explore the role of social–emotional learning in developing positive behaviors in students and the related challenges faced by teachers in the United States and India.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Der multimodale Lern-Hub: Ein Werkzeug zur Erfassung individualisierbarer und sensorgestützter multimodaler Lernerfahrungen.\n \n \n \n \n\n\n \n Schneider, J.; Di Mitri, D.; Limbu, B.; and Drachsler, H.\n\n\n \n\n\n\n In Fürst, R. A., editor(s), Digitale Bildung und Künstliche Intelligenz in Deutschland, pages 537–557. Springer Fachmedien Wiesbaden, Wiesbaden, 2020.\n \n\n\n\n
\n\n\n\n \n \n \"DerPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{furst_multimodale_2020,\n\taddress = {Wiesbaden},\n\ttitle = {Der multimodale {Lern}-{Hub}: {Ein} {Werkzeug} zur {Erfassung} individualisierbarer und sensorgestützter multimodaler {Lernerfahrungen}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-658-30524-6},\n\tshorttitle = {Der multimodale {Lern}-{Hub}},\n\turl = {http://link.springer.com/10.1007/978-3-658-30525-3_23},\n\tlanguage = {de},\n\turldate = {2024-09-14},\n\tbooktitle = {Digitale {Bildung} und {Künstliche} {Intelligenz} in {Deutschland}},\n\tpublisher = {Springer Fachmedien Wiesbaden},\n\tauthor = {Schneider, Jan and Di Mitri, Daniele and Limbu, Bibeg and Drachsler, Hendrik},\n\teditor = {Fürst, Ronny Alexander},\n\tyear = {2020},\n\tdoi = {10.1007/978-3-658-30525-3_23},\n\tpages = {537--557},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Preface: CrossMMLA in practice: Collecting, annotating and analyzing multimodal data across spaces.\n \n \n \n \n\n\n \n Giannakos, M.; Spikol, D.; Molenaar, I.; Di Mitri, D.; Sharma, K.; Ochoa, X.; and Hammad, R.\n\n\n \n\n\n\n In CEUR Workshop Proceedings, volume 2610, 2020. CEUR Workshop Proceedings\n \n\n\n\n
\n\n\n\n \n \n \"Preface:Paper\n  \n \n \n \"Preface: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{giannakos_preface:_2020,\n\ttitle = {Preface: {CrossMMLA} in practice: {Collecting}, annotating and analyzing multimodal data across spaces},\n\tvolume = {2610},\n\tcopyright = {All rights reserved},\n\turl = {http://ceur-ws.org/Vol-2610/xpreface.pdf},\n\turldate = {2024-09-14},\n\tbooktitle = {{CEUR} {Workshop} {Proceedings}},\n\tpublisher = {CEUR Workshop Proceedings},\n\tauthor = {Giannakos, M. and Spikol, D. and Molenaar, I. and Di Mitri, D. and Sharma, K. and Ochoa, X. and Hammad, R.},\n\tyear = {2020},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/MCMRUH9G/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of CrossMMLA in practice: Collecting, annotating and analyzing multimodal data across spaces co-located with 10th International Learning and Analytics Conference (LAK 2020).\n \n \n \n \n\n\n \n Giannakos, M. N.; Spikol, D.; Molenaar, I.; Di Mitri, D.; Sharma, K.; Ochoa, X.; and Hammad, R.\n\n\n \n\n\n\n 2020.\n Accepted: 2020-08-31T20:29:36Z Publisher: Aachen, Germany : CEUR-WS\n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n \n \"Proceedings paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{giannakos_proceedings_2020,\n\ttitle = {Proceedings of {CrossMMLA} in practice: {Collecting}, annotating and analyzing multimodal data across spaces co-located with 10th {International} {Learning} and {Analytics} {Conference} ({LAK} 2020)},\n\tcopyright = {Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License (CC-BY-NC-ND)},\n\tshorttitle = {Proceedings of {CrossMMLA} in practice},\n\turl = {http://ceur-ws.org/Vol-2610/},\n\tabstract = {CrossMMLA in practice (24 March 2020)},\n\tlanguage = {English (eng)},\n\turldate = {2021-02-16},\n\tauthor = {Giannakos, M. N. and Spikol, D. and Molenaar, I. and Di Mitri, D. and Sharma, K. and Ochoa, X. and Hammad, R.},\n\tyear = {2020},\n\tnote = {Accepted: 2020-08-31T20:29:36Z\nPublisher: Aachen, Germany : CEUR-WS},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/QML3JSSD/file/view}\n}\n\n\n\n
\n
\n\n\n
\n CrossMMLA in practice (24 March 2020)\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Multimodal Tutor: Adaptive Feedback from Multimodal Experiences.\n \n \n \n \n\n\n \n Di Mitri, D.\n\n\n \n\n\n\n Ph.D. Thesis, The Open University of The Netherlands, Heerlen, The Netherlands, September 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{di_mitri_multimodal_2020,\n\taddress = {Heerlen, The Netherlands},\n\ttitle = {The {Multimodal} {Tutor}: {Adaptive} {Feedback} from {Multimodal} {Experiences}},\n\tcopyright = {All rights reserved},\n\tshorttitle = {The {Multimodal} {Tutor}},\n\turl = {https://research.ou.nl/en/publications/the-multimodal-tutor-adaptive-feedback-from-multimodal-experience},\n\tabstract = {This doctoral thesis describes the journey of ideation, prototyping and empirical testing of the Multimodal Tutor, a system designed for providing digital feedback that supports psychomotor skills acquisition using learning and multimodal data capturing. The feedback is given in real-time with machine-driven assessment of the learner's task execution. The predictions are tailored by supervised machine learning models trained with human annotated samples. The main contributions of this thesis are: a literature survey on multimodal data for learning, a conceptual model (the Multimodal Learning Analytics Model), a technological framework (the Multimodal Pipeline), a data annotation tool (the Visual Inspection Tool) and a case study in Cardiopulmonary Resuscitation training (CPR Tutor). The CPR Tutor generates real-time, adaptive feedback using kinematic and myographic data and neural networks.},\n\tlanguage = {English},\n\turldate = {2021-01-08},\n\tschool = {The Open University of The Netherlands},\n\tauthor = {Di Mitri, Daniele},\n\tmonth = sep,\n\tyear = {2020},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/S6VVK3XR/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n This doctoral thesis describes the journey of ideation, prototyping and empirical testing of the Multimodal Tutor, a system designed for providing digital feedback that supports psychomotor skills acquisition using learning and multimodal data capturing. The feedback is given in real-time with machine-driven assessment of the learner's task execution. The predictions are tailored by supervised machine learning models trained with human annotated samples. The main contributions of this thesis are: a literature survey on multimodal data for learning, a conceptual model (the Multimodal Learning Analytics Model), a technological framework (the Multimodal Pipeline), a data annotation tool (the Visual Inspection Tool) and a case study in Cardiopulmonary Resuscitation training (CPR Tutor). The CPR Tutor generates real-time, adaptive feedback using kinematic and myographic data and neural networks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Real-Time Multimodal Feedback with the CPR Tutor.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Trebing, K.; Sopka, S.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n In Bittencourt, I.; Cukurova, M.; and Muldner, K., editor(s), Artificial Intelligence in Education (AIED'2020), pages 141–152, Cham, Switzerland, July 2020. Springer, Cham\n \n\n\n\n
\n\n\n\n \n \n \"Real-TimePaper\n  \n \n \n \"Real-Time paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_real-time_2020,\n\taddress = {Cham, Switzerland},\n\ttitle = {Real-{Time} {Multimodal} {Feedback} with the {CPR} {Tutor}},\n\tcopyright = {All rights reserved},\n\turl = {https://doi.org/10.1007/978-3-030-52237-7_12},\n\tdoi = {10.1007/978-3-030-52237-7_12},\n\tabstract = {We developed the CPR Tutor, a real-time multimodal feedback system for cardiopulmonary resuscitation (CPR) training. The CPR Tutor detects mistakes using recurrent neural networks for real-time time-series classification. From a multimodal data stream consisting of kinematic and electromyographic data, the CPR Tutor system automatically detects the chest compressions, which are then classified and assessed according to five performance indicators. Based on this assessment, the CPR Tutor provides audio feedback to correct the most critical mistakes and improve the CPR performance. To test the validity of the CPR Tutor, we first collected the data corpus from 10 experts used for model training. Hence, to test the impact of the feedback functionality, we ran a user study involving 10 participants. The CPR Tutor pushes forward the current state of the art of real-time multimodal tutors by providing: 1) an architecture design, 2) a methodological approach to design multimodal feedback and 3) a field study on real-time feedback for CPR training.},\n\tbooktitle = {Artificial {Intelligence} in {Education} ({AIED}'2020)},\n\tpublisher = {Springer, Cham},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Trebing, Kevin and Sopka, Sasa and Specht, Marcus and Drachsler, Hendrik},\n\teditor = {Bittencourt, I.I. and Cukurova, M. and Muldner, K.},\n\tmonth = jul,\n\tyear = {2020},\n\tpages = {141--152},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/2HF8PXAD/file/view}\n}\n
\n
\n\n\n
\n We developed the CPR Tutor, a real-time multimodal feedback system for cardiopulmonary resuscitation (CPR) training. The CPR Tutor detects mistakes using recurrent neural networks for real-time time-series classification. From a multimodal data stream consisting of kinematic and electromyographic data, the CPR Tutor system automatically detects the chest compressions, which are then classified and assessed according to five performance indicators. Based on this assessment, the CPR Tutor provides audio feedback to correct the most critical mistakes and improve the CPR performance. To test the validity of the CPR Tutor, we first collected the data corpus from 10 experts used for model training. Hence, to test the impact of the feedback functionality, we ran a user study involving 10 participants. The CPR Tutor pushes forward the current state of the art of real-time multimodal tutors by providing: 1) an architecture design, 2) a methodological approach to design multimodal feedback and 3) a field study on real-time feedback for CPR training.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Read Between the Lines: An Annotation Tool for Multimodal Data for Learning.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Klemke, R.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n In Proceedings of the 9th International Conference on Learning Analytics & Knowledge, pages 51–60, Tempe AZ USA, March 2019. ACM\n \n\n\n\n
\n\n\n\n \n \n \"ReadPaper\n  \n \n \n \"Read paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_read_2019,\n\taddress = {Tempe AZ USA},\n\ttitle = {Read {Between} the {Lines}: {An} {Annotation} {Tool} for {Multimodal} {Data} for {Learning}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4503-6256-6},\n\tshorttitle = {Read {Between} the {Lines}},\n\turl = {https://dl.acm.org/doi/10.1145/3303772.3303776},\n\tdoi = {10.1145/3303772.3303776},\n\tlanguage = {en},\n\turldate = {2024-09-14},\n\tbooktitle = {Proceedings of the 9th {International} {Conference} on {Learning} {Analytics} \\& {Knowledge}},\n\tpublisher = {ACM},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Klemke, Roland and Specht, Marcus and Drachsler, Hendrik},\n\tmonth = mar,\n\tyear = {2019},\n\tpages = {51--60},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/2CYXPV5R/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal Pipeline: A generic approach for handling multimodal data for supporting learning.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n In AIMA4EDU Workshop in IJCAI 2019 AI-based Multimodal Analytics for Understanding Human Learning in Real-world Educational Contexts, pages 2–4, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Multimodal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_multimodal_2019,\n\ttitle = {Multimodal {Pipeline}: {A} generic approach for handling multimodal data for supporting learning},\n\tcopyright = {All rights reserved},\n\tabstract = {In this demo paper, we introduce the Multimodal Pipeline, a prototypical approach for the collection, storing, annotation, processing and exploitation of multimodal data for supporting learning. At the current stage of development, the Multimodal Pipeline consists of two relevant prototypes: 1) Multimodal Learning Hub for the collection and storing of sensor data from multiple applications and 2) the Visual Inspection Tool for visualisation and annotation of the recorded sessions. The Multimodal Pipeline is designed to be a flexible system useful for supporting psychomotor skills in a variety of learning scenarios such as presentation skills, medical simulation with patient manikins or calligraphy learning. The Multimodal Pipeline can be configured to serve different support strategies, including detecting mistakes and prompting live feedback in an intelligent tutoring system or stimulating self-reflection through a learning analytics dashboard.},\n\tbooktitle = {{AIMA4EDU} {Workshop} in {IJCAI} 2019 {AI}-based {Multimodal} {Analytics} for {Understanding} {Human} {Learning} in {Real}-world {Educational} {Contexts}},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Specht, Marcus and Drachsler, Hendrik},\n\tyear = {2019},\n\tpages = {2--4},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/T33UWAQD/file/view}\n}\n\n\n\n
\n
\n\n\n
\n In this demo paper, we introduce the Multimodal Pipeline, a prototypical approach for the collection, storing, annotation, processing and exploitation of multimodal data for supporting learning. At the current stage of development, the Multimodal Pipeline consists of two relevant prototypes: 1) Multimodal Learning Hub for the collection and storing of sensor data from multiple applications and 2) the Visual Inspection Tool for visualisation and annotation of the recorded sessions. The Multimodal Pipeline is designed to be a flexible system useful for supporting psychomotor skills in a variety of learning scenarios such as presentation skills, medical simulation with patient manikins or calligraphy learning. The Multimodal Pipeline can be configured to serve different support strategies, including detecting mistakes and prompting live feedback in an intelligent tutoring system or stimulating self-reflection through a learning analytics dashboard.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detecting mistakes in CPR training with multimodal data and neural networks.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n Sensors (Switzerland), 19(14): 1–20. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Detecting paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{di_mitri_detecting_2019,\n\ttitle = {Detecting mistakes in {CPR} training with multimodal data and neural networks},\n\tvolume = {19},\n\tcopyright = {All rights reserved},\n\tdoi = {10.3390/s19143099},\n\tabstract = {This study investigated to what extent multimodal data can be used to detect mistakes during Cardiopulmonary Resuscitation (CPR) training. We complemented the Laerdal QCPR ResusciAnne manikin with the Multimodal Tutor for CPR, a multi-sensor system consisting of a Microsoft Kinect for tracking body position and a Myo armband for collecting electromyogram information. We collected multimodal data from 11 medical students, each of them performing two sessions of two-minute chest compressions (CCs). We gathered in total 5254 CCs that were all labelled according to five performance indicators, corresponding to common CPR training mistakes. Three out of five indicators, CC rate, CC depth and CC release, were assessed automatically by the ReusciAnne manikin. The remaining two, related to arms and body position, were annotated manually by the research team. We trained five neural networks for classifying each of the five indicators. The results of the experiment show that multimodal data can provide accurate mistake detection as compared to the ResusciAnne manikin baseline. We also show that the Multimodal Tutor for CPR can detect additional CPR training mistakes such as the correct use of arms and body weight. Thus far, these mistakes were identified only by human instructors. Finally, to investigate user feedback in the future implementations of the Multimodal Tutor for CPR, we conducted a questionnaire to collect valuable feedback aspects of CPR training.},\n\tnumber = {14},\n\tjournal = {Sensors (Switzerland)},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Specht, Marcus and Drachsler, Hendrik},\n\tyear = {2019},\n\tpages = {1--20},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/8DPBHQ49/file/view}\n}\n\n\n\n
\n
\n\n\n
\n This study investigated to what extent multimodal data can be used to detect mistakes during Cardiopulmonary Resuscitation (CPR) training. We complemented the Laerdal QCPR ResusciAnne manikin with the Multimodal Tutor for CPR, a multi-sensor system consisting of a Microsoft Kinect for tracking body position and a Myo armband for collecting electromyogram information. We collected multimodal data from 11 medical students, each of them performing two sessions of two-minute chest compressions (CCs). We gathered in total 5254 CCs that were all labelled according to five performance indicators, corresponding to common CPR training mistakes. Three out of five indicators, CC rate, CC depth and CC release, were assessed automatically by the ReusciAnne manikin. The remaining two, related to arms and body position, were annotated manually by the research team. We trained five neural networks for classifying each of the five indicators. The results of the experiment show that multimodal data can provide accurate mistake detection as compared to the ResusciAnne manikin baseline. We also show that the Multimodal Tutor for CPR can detect additional CPR training mistakes such as the correct use of arms and body weight. Thus far, these mistakes were identified only by human instructors. Finally, to investigate user feedback in the future implementations of the Multimodal Tutor for CPR, we conducted a questionnaire to collect valuable feedback aspects of CPR training.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detecting medical simulation errors with machine learning and multimodal data.\n \n \n \n \n\n\n \n Di Mitri, D.\n\n\n \n\n\n\n In 17th conference on artificial intelligence in medicine, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"DetectingPaper\n  \n \n \n \"Detecting paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_detecting_2019,\n\ttitle = {Detecting medical simulation errors with machine learning and multimodal data},\n\tcopyright = {All rights reserved},\n\turl = {https://research.ou.nl/en/publications/detecting-medical-simulation-errors-with-machine-learning-and-mul},\n\turldate = {2024-09-14},\n\tbooktitle = {17th conference on artificial intelligence in medicine},\n\tauthor = {Di Mitri, Daniele},\n\tyear = {2019},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/FDRM9F2A/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Architecture and Design Patterns for Distributed, Scalable Augmented Reality and Wearable Technology Systems.\n \n \n \n \n\n\n \n Guest, W.; Wild, F.; Mitri, D. D.; Klemke, R.; Karjalainen, J.; and Helin, K.\n\n\n \n\n\n\n In 2019 IEEE International Conference on Engineering, Technology and Education (TALE), pages 1–8, December 2019. \n ISSN: 2470-6698\n\n\n\n
\n\n\n\n \n \n \"Architecture paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{guest_architecture_2019,\n\ttitle = {Architecture and {Design} {Patterns} for {Distributed}, {Scalable} {Augmented} {Reality} and {Wearable} {Technology} {Systems}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/TALE48000.2019.9225855},\n\tabstract = {This paper presents a novel reference software architecture and supporting pattern language for an augmented reality authoring and training system. Industry-based augmented reality training is considered an essential element of the next techno-industrial revolution. These next generation learning environments allow a trainee to offload complexity and giving them live (or on-demand) feedback on their progress and performance through workplace augmentation is already being taken up by industry forerunners. This reference architecture - for wearable experience for knowledge intensive training - incorporates head-mounted augmented vision, an array of wearable sensors that monitor movement and physiological signals, a data-layer managing sensor data and a cloud-based repository for storing information about the activity and workplace. Moreover, this architecture has been tested in a range of knowledge intensive workplaces, in the aeronautic, medical and space industries. Two iterations of the architecture were developed and validated, together with over 500 participants, producing datasets on activity performance, physiological state and assessment of the platform. The components and links of the architecture are presented here as generalizable design patterns to support wider development. We then propose a pattern language for augmented reality training applications.},\n\tbooktitle = {2019 {IEEE} {International} {Conference} on {Engineering}, {Technology} and {Education} ({TALE})},\n\tauthor = {Guest, Will and Wild, Fridolin and Mitri, Daniele Di and Klemke, Roland and Karjalainen, Jaakko and Helin, Kaj},\n\tmonth = dec,\n\tyear = {2019},\n\tnote = {ISSN: 2470-6698},\n\tpages = {1--8},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/2T5QW3B3/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n This paper presents a novel reference software architecture and supporting pattern language for an augmented reality authoring and training system. Industry-based augmented reality training is considered an essential element of the next techno-industrial revolution. These next generation learning environments allow a trainee to offload complexity and giving them live (or on-demand) feedback on their progress and performance through workplace augmentation is already being taken up by industry forerunners. This reference architecture - for wearable experience for knowledge intensive training - incorporates head-mounted augmented vision, an array of wearable sensors that monitor movement and physiological signals, a data-layer managing sensor data and a cloud-based repository for storing information about the activity and workplace. Moreover, this architecture has been tested in a range of knowledge intensive workplaces, in the aeronautic, medical and space industries. Two iterations of the architecture were developed and validated, together with over 500 participants, producing datasets on activity performance, physiological state and assessment of the platform. The components and links of the architecture are presented here as generalizable design patterns to support wider development. We then propose a pattern language for augmented reality training applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Multimodal Learning Analytics Pipeline.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n In 4th International Conference on AI+ Adaptive Education, pages 1–2, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_multimodal_2019,\n\ttitle = {The {Multimodal} {Learning} {Analytics} {Pipeline}},\n\tcopyright = {All rights reserved},\n\turl = {https://www.researchgate.net/profile/Daniele-Di-Mitri/publication/333967598_The_Multimodal_Learning_Analytics_Pipeline/links/5d0f98ad299bf1547c793139/The-Multimodal-Learning-Analytics-Pipeline.pdf},\n\turldate = {2024-09-14},\n\tbooktitle = {4th {International} {Conference} on {AI}+ {Adaptive} {Education}},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Specht, Marcus and Drachsler, Hendrik},\n\tyear = {2019},\n\tpages = {1--2},\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal Learning Analytics Runtime Framework.\n \n \n \n \n\n\n \n Schneider, J.; Di Mitri, D.; Drachsler, H.; and Specht, M.\n\n\n \n\n\n\n In Proceedings of the Third Multimodal Learning Analytics Across (Physical and Digital) Spaces (CrossMMLA)., pages 1–6, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Multimodal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{schneider_multimodal_2019,\n\ttitle = {Multimodal {Learning} {Analytics} {Runtime} {Framework}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Proceedings of the {Third} {Multimodal} {Learning} {Analytics} {Across} ({Physical} and {Digital}) {Spaces} ({CrossMMLA}).},\n\tauthor = {Schneider, Jan and Di Mitri, Daniele and Drachsler, Hendrik and Specht, Marcus},\n\tyear = {2019},\n\tpages = {1--6},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/TRCQHUWB/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The Big Five: Addressing Recurrent Multimodal Learning Data Challenges.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n In Martinez-Maldonado Roberto, editor(s), Proceedings of the Second Multimodal Learning Analytics Across (Physical and Digital) Spaces (CrossMMLA), pages 6–6, Aachen, 2018. CEUR Workshop Proceedings\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_big_2018,\n\taddress = {Aachen},\n\ttitle = {The {Big} {Five}: {Addressing} {Recurrent} {Multimodal} {Learning} {Data} {Challenges}},\n\tcopyright = {All rights reserved},\n\turl = {http://ceur-ws.org/Vol-2163/paper6.pdf},\n\tabstract = {The analysis of multimodal data in learning is a growing field of research, which has led to the development of different analytics solutions. However, there is no standardised approach to handle multimodal data. In this paper, we describe and outline a solution for five recurrent challenges in the analysis of multimodal data: the data collection, storing, annotation, processing and exploitation. For each of these challenges, we envision possible solutions. The prototypes for some of the proposed solutions will be discussed during the Multimodal Challenge of the fourth Learning Analytics \\& Knowledge Hackathon, a two-day hands-on workshop in which the authors will open up the prototypes for trials, validation and feedback.},\n\tbooktitle = {Proceedings of the {Second} {Multimodal} {Learning} {Analytics} {Across} ({Physical} and {Digital}) {Spaces} ({CrossMMLA})},\n\tpublisher = {CEUR Workshop Proceedings},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Specht, Marcus and Drachsler, Hendrik},\n\teditor = {{Martinez-Maldonado Roberto}},\n\tyear = {2018},\n\tpages = {6--6},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/HR8R8MUT/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n The analysis of multimodal data in learning is a growing field of research, which has led to the development of different analytics solutions. However, there is no standardised approach to handle multimodal data. In this paper, we describe and outline a solution for five recurrent challenges in the analysis of multimodal data: the data collection, storing, annotation, processing and exploitation. For each of these challenges, we envision possible solutions. The prototypes for some of the proposed solutions will be discussed during the Multimodal Challenge of the fourth Learning Analytics & Knowledge Hackathon, a two-day hands-on workshop in which the authors will open up the prototypes for trials, validation and feedback.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal Challenge: Analytics Beyond User-computer Interaction Data.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n In undefined, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n \n \"Multimodal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_multimodal_2018,\n\ttitle = {Multimodal {Challenge}: {Analytics} {Beyond} {User}-computer {Interaction} {Data}},\n\tcopyright = {All rights reserved},\n\tshorttitle = {Multimodal {Challenge}},\n\turl = {https://www.semanticscholar.org/paper/Multimodal-Challenge%3A-Analytics-Beyond-Interaction-Mitri-Schneider/6a374d340738bbe530c07cfbb18c1787c78c8d87},\n\tabstract = {This contribution describes one the challenges explored in the Fourth LAK Hackathon, aimed at shifting the focus from learning situations which can be easily traced through user-computer interactions data and concentrate more on user-world interactions events, typical of co-located and practice-based learning experiences. This contribution describes one the challenges explored in the Fourth LAK Hackathon. This challenge aims at shifting the focus from learning situations which can be easily traced through user-computer interactions data and concentrate more on user-world interactions events, typical of co-located and practice-based learning experiences. This mission, pursued by the multimodal learning analytics (MMLA) community, seeks to bridge \nthe gap between digital and physical learning spaces. The “multimodal” approach consists in combining learners’ motoric actions with physiological responses and data about the learning contexts. These data can be collected through multiple wearable sensors and Internet of Things (IoT) devices. This Hackathon table will confront with three main challenges arising from the analysis and valorisation of multimodal datasets: 1) the data \ncollection and storing, 2) the data annotation, 3) the data processing and exploitation. Some research questions which will be considered in this Hackathon challenge are the following: how to process the raw sensor data streams and extract relevant features? which data mining and machine learning techniques can be applied? how can we compare two action recordings? How to combine sensor data with Experience API (xAPI)? what are meaningful visualisations for these data?},\n\tlanguage = {en},\n\turldate = {2022-11-30},\n\tbooktitle = {undefined},\n\tauthor = {Di Mitri, Daniele and Schneider, J. and Specht, M. and Drachsler, H.},\n\tyear = {2018},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/RXVK5CI9/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n This contribution describes one the challenges explored in the Fourth LAK Hackathon, aimed at shifting the focus from learning situations which can be easily traced through user-computer interactions data and concentrate more on user-world interactions events, typical of co-located and practice-based learning experiences. This contribution describes one the challenges explored in the Fourth LAK Hackathon. This challenge aims at shifting the focus from learning situations which can be easily traced through user-computer interactions data and concentrate more on user-world interactions events, typical of co-located and practice-based learning experiences. This mission, pursued by the multimodal learning analytics (MMLA) community, seeks to bridge the gap between digital and physical learning spaces. The “multimodal” approach consists in combining learners’ motoric actions with physiological responses and data about the learning contexts. These data can be collected through multiple wearable sensors and Internet of Things (IoT) devices. This Hackathon table will confront with three main challenges arising from the analysis and valorisation of multimodal datasets: 1) the data collection and storing, 2) the data annotation, 3) the data processing and exploitation. Some research questions which will be considered in this Hackathon challenge are the following: how to process the raw sensor data streams and extract relevant features? which data mining and machine learning techniques can be applied? how can we compare two action recordings? How to combine sensor data with Experience API (xAPI)? what are meaningful visualisations for these data?\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From signals to knowledge: A conceptual model for multimodal learning analytics.\n \n \n \n \n\n\n \n Di Mitri, D.; Schneider, J.; Specht, M.; and Drachsler, H.\n\n\n \n\n\n\n Journal of Computer Assisted Learning, 34(4): 338–349. August 2018.\n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n \n \"From paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{di_mitri_signals_2018,\n\ttitle = {From signals to knowledge: {A} conceptual model for multimodal learning analytics},\n\tvolume = {34},\n\tcopyright = {All rights reserved},\n\tissn = {0266-4909, 1365-2729},\n\tshorttitle = {From signals to knowledge},\n\turl = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12288},\n\tdoi = {10.1111/jcal.12288},\n\tabstract = {Abstract\n            Multimodality in learning analytics and learning science is under the spotlight. The landscape of sensors and wearable trackers that can be used for learning support is evolving rapidly, as well as data collection and analysis methods. Multimodal data can now be collected and processed in real time at an unprecedented scale. With sensors, it is possible to capture observable events of the learning process such as learner's behaviour and the learning context. The learning process, however, consists also of latent attributes, such as the learner's cognitions or emotions. These attributes are unobservable to sensors and need to be elicited by human‐driven interpretations. We conducted a literature survey of experiments using multimodal data to frame the young research field of multimodal learning analytics. The survey explored the multimodal data used in related studies (the input space) and the learning theories selected (the hypothesis space). The survey led to the formulation of the Multimodal Learning Analytics Model whose main objectives are of (O1) mapping the use of multimodal data to enhance the feedback in a learning context; (O2) showing how to combine machine learning with multimodal data; and (O3) aligning the terminology used in the field of machine learning and learning science.\n          , \n            Lay Description\n            \n              What is already known about this topic:\n              \n                \n                  Multimodal data can capture fine‐grained measurements of educational traces.\n                \n                \n                  Many sensors can be now used in the domain of education to collect data.\n                \n                \n                  These data are records of learning and can be used to investigate it.\n                \n                \n                  Learning happens across physical and digital spaces.\n                \n              \n            \n            \n              What this paper adds:\n              \n                \n                  It reports the results of a literature survey in the field of multimodal learning analytics.\n                \n                \n                  It provides a taxonomy to organize for the first time the different modalities in learning from a sensor perspective.\n                \n                \n                  It introduces the concept of observability line.\n                \n                \n                  It explains how machine learning can be used on multimodal data to improve learning.\n                \n                \n                  It aligns the terminologies used by the learning science and the machine learning communities.\n                \n              \n            \n            \n              Implications for practice and/or policy:\n              \n                \n                  The model proposed can be used in future multimodal learning analytics research to enhance feedback for learners.\n                \n                \n                  The feedback provided to the learner can become more adaptive and therefore make the learning more effective.\n                \n                \n                  The multimodal learning analytics community can profit to a shared understanding on how to use multimodal data for learning.\n                \n                \n                  The model supports continuous assessment, which can in the future replace future examination.},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2024-09-14},\n\tjournal = {Journal of Computer Assisted Learning},\n\tauthor = {Di Mitri, Daniele and Schneider, Jan and Specht, Marcus and Drachsler, Hendrik},\n\tmonth = aug,\n\tyear = {2018},\n\tpages = {338--349},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/RLCZ799F/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Abstract Multimodality in learning analytics and learning science is under the spotlight. The landscape of sensors and wearable trackers that can be used for learning support is evolving rapidly, as well as data collection and analysis methods. Multimodal data can now be collected and processed in real time at an unprecedented scale. With sensors, it is possible to capture observable events of the learning process such as learner's behaviour and the learning context. The learning process, however, consists also of latent attributes, such as the learner's cognitions or emotions. These attributes are unobservable to sensors and need to be elicited by human‐driven interpretations. We conducted a literature survey of experiments using multimodal data to frame the young research field of multimodal learning analytics. The survey explored the multimodal data used in related studies (the input space) and the learning theories selected (the hypothesis space). The survey led to the formulation of the Multimodal Learning Analytics Model whose main objectives are of (O1) mapping the use of multimodal data to enhance the feedback in a learning context; (O2) showing how to combine machine learning with multimodal data; and (O3) aligning the terminology used in the field of machine learning and learning science. , Lay Description What is already known about this topic: Multimodal data can capture fine‐grained measurements of educational traces. Many sensors can be now used in the domain of education to collect data. These data are records of learning and can be used to investigate it. Learning happens across physical and digital spaces. What this paper adds: It reports the results of a literature survey in the field of multimodal learning analytics. It provides a taxonomy to organize for the first time the different modalities in learning from a sensor perspective. It introduces the concept of observability line. It explains how machine learning can be used on multimodal data to improve learning. It aligns the terminologies used by the learning science and the machine learning communities. Implications for practice and/or policy: The model proposed can be used in future multimodal learning analytics research to enhance feedback for learners. The feedback provided to the learner can become more adaptive and therefore make the learning more effective. The multimodal learning analytics community can profit to a shared understanding on how to use multimodal data for learning. The model supports continuous assessment, which can in the future replace future examination.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multimodal Learning Hub: A Tool for Capturing Customizable Multimodal Learning Experiences.\n \n \n \n \n\n\n \n Schneider, J.; Di Mitri, D.; Limbu, B.; and Drachsler, H.\n\n\n \n\n\n\n In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), volume 11082 LNCS, pages 45–58, Cham, Switzerland, 2018. Springer\n \n\n\n\n
\n\n\n\n \n \n \"Multimodal paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{schneider_multimodal_2018,\n\taddress = {Cham, Switzerland},\n\ttitle = {Multimodal {Learning} {Hub}: {A} {Tool} for {Capturing} {Customizable} {Multimodal} {Learning} {Experiences}},\n\tvolume = {11082 LNCS},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-319-98571-8},\n\tdoi = {10.1007/978-3-319-98572-5_4},\n\tabstract = {Systematic reviews can provide up-to-date syntheses of reliable evidence on “what works” to help policymakers, practitioners, and people who use services make well-informed decisions about social and behavioral interventions. However, systematic reviews of social and behavioral interventions do not typically include evidence on resource use and costs, critical dimensions for decision makers to consider when faced with limited resources and constrained budgets. This paper builds on existing recommendations for including evidence for resource use and costs in systematic reviews by illustrating the development and use of an instrument to code resource use and cost data from an existing systematic review on the effects of adolescent depression prevention programs and applying that instrument to 46 studies included in that review. We demonstrate that resource use and cost data are relatively sparsely reported for treatment conditions in reports of included studies and even more so for comparison conditions, although the reporting of the most important cost drivers is reasonably frequent for treatment conditions. To allow for better integration of resource use and cost data into systematic reviews, future studies that aim to inform decision making should report more detail about program resource use and costs required for implementation, perhaps using the template provided in this paper. Copyright © 2012 John Wiley \\& Sons, Ltd.},\n\tbooktitle = {Lecture {Notes} in {Computer} {Science} (including subseries {Lecture} {Notes} in {Artificial} {Intelligence} and {Lecture} {Notes} in {Bioinformatics})},\n\tpublisher = {Springer},\n\tauthor = {Schneider, Jan and Di Mitri, Daniele and Limbu, Bibeg and Drachsler, Hendrik},\n\tyear = {2018},\n\tpages = {45--58},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/E6FCCSMH/file/view}\n}\n\n\n\n
\n
\n\n\n
\n Systematic reviews can provide up-to-date syntheses of reliable evidence on “what works” to help policymakers, practitioners, and people who use services make well-informed decisions about social and behavioral interventions. However, systematic reviews of social and behavioral interventions do not typically include evidence on resource use and costs, critical dimensions for decision makers to consider when faced with limited resources and constrained budgets. This paper builds on existing recommendations for including evidence for resource use and costs in systematic reviews by illustrating the development and use of an instrument to code resource use and cost data from an existing systematic review on the effects of adolescent depression prevention programs and applying that instrument to 46 studies included in that review. We demonstrate that resource use and cost data are relatively sparsely reported for treatment conditions in reports of included studies and even more so for comparison conditions, although the reporting of the most important cost drivers is reasonably frequent for treatment conditions. To allow for better integration of resource use and cost data into systematic reviews, future studies that aim to inform decision making should report more detail about program resource use and costs required for implementation, perhaps using the template provided in this paper. Copyright © 2012 John Wiley & Sons, Ltd.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Affordances for Capturing and Re-enacting Expert Performance with Wearables.\n \n \n \n\n\n \n Guest, W.; Wild, F.; Vovk, A.; Fominykh, M.; Limbu, B.; Klemke, R.; Sharma, P.; Karjalainen, J.; Smith, C.; Rasool, J.; Aswat, S.; Helin, K.; Di Mitri, D.; and Schneider, J.\n\n\n \n\n\n\n In Lavoué, É.; Drachsler, H.; Verbert, K.; Broisin, J.; and Pérez-Sanagustín, M., editor(s), Data Driven Approaches in Digital Education, pages 403–409, Cham, 2017. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{guest_affordances_2017,\n\taddress = {Cham},\n\ttitle = {Affordances for {Capturing} and {Re}-enacting {Expert} {Performance} with {Wearables}},\n\tisbn = {978-3-319-66610-5},\n\tdoi = {10.1007/978-3-319-66610-5_34},\n\tabstract = {The WEKIT.one prototype is a platform for immersive procedural training with wearable sensors and Augmented Reality. Focusing on capture and re-enactment of human expertise, this work looks at the unique affordances of suitable hard- and software technologies. The practical challenges of interpreting expertise, using suitable sensors for its capture and specifying the means to describe and display to the novice are of central significance here. We link affordances with hardware devices, discussing their alternatives, including Microsoft Hololens, Thalmic Labs MYO, Alex Posture sensor, MyndPlay EEG headband, and a heart rate sensor. Following the selection of sensors, we describe integration and communication requirements for the prototype. We close with thoughts on the wider possibilities for implementation and next steps.},\n\tlanguage = {en},\n\tbooktitle = {Data {Driven} {Approaches} in {Digital} {Education}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Guest, Will and Wild, Fridolin and Vovk, Alla and Fominykh, Mikhail and Limbu, Bibeg and Klemke, Roland and Sharma, Puneet and Karjalainen, Jaakko and Smith, Carl and Rasool, Jazz and Aswat, Soyeb and Helin, Kaj and Di Mitri, Daniele and Schneider, Jan},\n\teditor = {Lavoué, Élise and Drachsler, Hendrik and Verbert, Katrien and Broisin, Julien and Pérez-Sanagustín, Mar},\n\tyear = {2017},\n\tpages = {403--409},\n}\n\n\n\n
\n
\n\n\n
\n The WEKIT.one prototype is a platform for immersive procedural training with wearable sensors and Augmented Reality. Focusing on capture and re-enactment of human expertise, this work looks at the unique affordances of suitable hard- and software technologies. The practical challenges of interpreting expertise, using suitable sensors for its capture and specifying the means to describe and display to the novice are of central significance here. We link affordances with hardware devices, discussing their alternatives, including Microsoft Hololens, Thalmic Labs MYO, Alex Posture sensor, MyndPlay EEG headband, and a heart rate sensor. Following the selection of sensors, we describe integration and communication requirements for the prototype. We close with thoughts on the wider possibilities for implementation and next steps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning pulse: a machine learning approach for predicting performance in self-regulated learning using multimodal data.\n \n \n \n \n\n\n \n Di Mitri, D.; Scheffel, M.; Drachsler, H.; Börner, D.; Ternier, S.; and Specht, M.\n\n\n \n\n\n\n In Proceedings of the Seventh International Learning Analytics & Knowledge Conference, pages 188–197, Vancouver British Columbia Canada, March 2017. ACM\n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n \n \"Learning paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_learning_2017,\n\taddress = {Vancouver British Columbia Canada},\n\ttitle = {Learning pulse: a machine learning approach for predicting performance in self-regulated learning using multimodal data},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4503-4870-6},\n\tshorttitle = {Learning pulse},\n\turl = {https://dl.acm.org/doi/10.1145/3027385.3027447},\n\tdoi = {10.1145/3027385.3027447},\n\tlanguage = {en},\n\turldate = {2024-09-14},\n\tbooktitle = {Proceedings of the {Seventh} {International} {Learning} {Analytics} \\& {Knowledge} {Conference}},\n\tpublisher = {ACM},\n\tauthor = {Di Mitri, Daniele and Scheffel, Maren and Drachsler, Hendrik and Börner, Dirk and Ternier, Stefaan and Specht, Marcus},\n\tmonth = mar,\n\tyear = {2017},\n\tpages = {188--197},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/9QBZITQM/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Digital Learning Projection.\n \n \n \n \n\n\n \n Di Mitri, D.\n\n\n \n\n\n\n In André, E.; Baker, R.; Hu, X.; Rodrigo, M. M. T.; and du Boulay, B., editor(s), Artificial Intelligence in Education, of Lecture Notes in Computer Science, pages 609–612, Cham, 2017. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"Digital paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{di_mitri_digital_2017,\n\taddress = {Cham},\n\tseries = {Lecture {Notes} in {Computer} {Science}},\n\ttitle = {Digital {Learning} {Projection}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-319-61425-0},\n\tdoi = {10.1007/978-3-319-61425-0_75},\n\tabstract = {Multiple modalities of the learning process can now be captured on real-time through wearable and contextual sensors. By annotating these multimodal data (the input space) by expert assessments or self-reports (the output space), machine learning models can be trained to predict the learning performance. This can lead to continuous formative assessment and feedback generation, which can be used to personalise and contextualise content, improve awareness and support informed decisions about learning.},\n\tlanguage = {en},\n\tbooktitle = {Artificial {Intelligence} in {Education}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Di Mitri, Daniele},\n\teditor = {André, Elisabeth and Baker, Ryan and Hu, Xiangen and Rodrigo, Ma. Mercedes T. and du Boulay, Benedict},\n\tyear = {2017},\n\tpages = {609--612},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/GV5N7SH6/file/view}\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Multiple modalities of the learning process can now be captured on real-time through wearable and contextual sensors. By annotating these multimodal data (the input space) by expert assessments or self-reports (the output space), machine learning models can be trained to predict the learning performance. This can lead to continuous formative assessment and feedback generation, which can be used to personalise and contextualise content, improve awareness and support informed decisions about learning.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Software Prototype with Sensor Fusion API Specification and Usage Description: WEKIT project deliverable D3. 3.\n \n \n \n \n\n\n \n Klemke, R.; Di Mitri, D.; Limbu, B.; Schneider, J.; Sharma, P.; Wild, F.; and Azam, T.\n\n\n \n\n\n\n . 2016.\n \n\n\n\n
\n\n\n\n \n \n \"SoftwarePaper\n  \n \n \n \"Software paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{klemke_software_2016,\n\ttitle = {Software {Prototype} with {Sensor} {Fusion} {API} {Specification} and {Usage} {Description}: {WEKIT} project deliverable {D3}. 3},\n\tcopyright = {All rights reserved},\n\tshorttitle = {Software {Prototype} with {Sensor} {Fusion} {API} {Specification} and {Usage} {Description}},\n\turl = {https://research.ou.nl/en/publications/software-prototype-with-sensor-fusion-api-specification-and-usage},\n\turldate = {2024-09-14},\n\tauthor = {Klemke, Roland and Di Mitri, Daniele and Limbu, Bibeg and Schneider, Jan and Sharma, Puneet and Wild, Fridolin and Azam, Tre},\n\tyear = {2016},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/K7F49NQ2/file/view}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Learning Analytics per la valutazione formativa negli ambienti di apprendimento a distanza.\n \n \n \n \n\n\n \n Di Mitri, D.\n\n\n \n\n\n\n Ph.D. Thesis, 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{di_mitri_learning_2013,\n\ttitle = {Learning {Analytics} per la valutazione formativa negli ambienti di apprendimento a distanza},\n\tcopyright = {All rights reserved},\n\tauthor = {Di Mitri, Daniele},\n\tyear = {2013},\n\turl_paper={https://api.zotero.org/users/7275239/publications/items/2KPFGVQ6/file/view}\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);